source
stringlengths
3
92
c
stringlengths
26
2.25M
generator_gemm_common.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "generator_gemm_common.h" #include "generator_common.h" #include "generator_x86_instructions.h" #include "libxsmm_main.h" LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */ if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE4 ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 2; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD; } else { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS; } } else if ( i_arch <= LIBXSMM_X86_AVX2 ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'y'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } } else { io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 32; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'z'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { /* shouldn't happen as we caught this case earlier */ io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { /* that should no happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE4 ) { #if !defined(NDEBUG) fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n"); #endif libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c ); } else if ( i_arch <= LIBXSMM_X86_AVX2 ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 2; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { #if !defined(NDEBUG) fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n"); #endif libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c ); } else { /* should not happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { if ( ( i_arch < LIBXSMM_X86_SSE3 ) || ( i_arch > LIBXSMM_X86_ALLFEAT ) ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE4 ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 8; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD; } else { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 4; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS; } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 8; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 4; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } } else { /* should not happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc ) { if ( io_generated_code->code_type == 0 ) { char l_new_code[512]; const unsigned int l_max_code_length = sizeof(l_new_code) - 1; int l_code_length = 0; l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_m_blocking, const unsigned int i_k_blocking ) { LIBXSMM_UNUSED(i_m_blocking); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_max_blocked_k, const unsigned int i_kloop_complete ) { LIBXSMM_UNUSED(i_m_blocking); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); if ( i_kloop_complete != 0 ) { int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size; } else { l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_b, l_b_offset ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc) { LIBXSMM_UNUSED(i_xgemm_desc); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1); libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_n_blocking) { libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_n_blocking, const unsigned int i_n_done ) { if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/2)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/2)) ); } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/4)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/4)) ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); } /* B prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { unsigned int l_type_scaling; if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) || (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) { l_type_scaling = 2; } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_type_scaling = 4; } else { l_type_scaling = 1; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/l_type_scaling)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/l_type_scaling)) ); } } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); } #endif if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { /* handle trans B */ int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size; } else { l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size; } libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_b, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, l_b_offset ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_b, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); } libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } else { /* handle trans B */ int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size; } else { l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b, l_b_offset ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); } } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_m_blocking ) { libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_m_done ) { /* advance C pointer */ if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/2) ); } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/4) ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) ); } /* C prefetch */ #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) ); } #endif /* B prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { unsigned int l_type_scaling; if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) || (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) { l_type_scaling = 2; } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_type_scaling = 4; } else { l_type_scaling = 1; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size/l_type_scaling) ); } } /* A prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) { if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); } } /* advance A pointer */ if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); } /* loop handling */ libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_n_blocking ) { unsigned int l_m_blocking, l_vec_reg_acc_start; /* register blocking counter in n */ unsigned int l_n = 0; /* register blocking counter in m */ unsigned int l_m = 0; assert(0 < i_micro_kernel_config->vector_length); /* deriving register blocking from kernel config */ l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1; /* start register of accumulator */ l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking); #if !defined(NDEBUG) /* Do some test if it is possible to generate the requested code. This is not done in release mode and therefore bad things might happen.... HUAAH */ if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) { if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking != 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else {} #if 0 if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK ); return; } #endif #endif /*!defined(NDEBUG)*/ /* load C accumulator */ if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */ /* pure BF16 kernel */ if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) && ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* we add when scaling during conversion to FP32 */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* load 16 bit values into ymm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 1, 0 ); } /* convert 16 bit values into 32 bit (integer convert) */ libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVSXWD, i_micro_kernel_config->vector_name, 0, LIBXSMM_X86_VEC_REG_UNDEF, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF); /* shift 16 bits to the left to generate valid FP32 numbers */ libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPSLLD, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF, 16); } } /* pure int8 kernel */ } else if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) && ( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* we need to up convert int8 to int32 */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* load 16 bit values into xmm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU8, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4), 'z', 0, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4), 'x', 0, 0, 1, 0 ); } /* convert 8 bit values into 32 bit (integer convert) */ if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED) != 0 ) { libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVZXBD, i_micro_kernel_config->vector_name, 0, LIBXSMM_X86_VEC_REG_UNDEF, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF); } else { libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVSXBD, i_micro_kernel_config->vector_name, 0, LIBXSMM_X86_VEC_REG_UNDEF, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF); } } } } else { /* adding to C, so let's load C */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* we only mask the last m-blocked load */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size), i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 ); } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size)); } } #endif } } } else { /* overwriting C, so let's xout accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size)); } } #endif } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_n_blocking ) { /* deriving register blocking from kernel config */ unsigned int l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1; /* register blocking counter in n */ unsigned int l_n = 0; /* register blocking counter in m */ unsigned int l_m = 0; /* start register of accumulator */ unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking); /* select store instruction */ unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction; /* @TODO fix this test */ #if !defined(NDEBUG) if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) { if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else {} #if 0 if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK ); return; } #endif #endif if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) ) && ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* init stack with helper variables for SW-based RNE rounding */ /* push 0x7f800000 on the stack, naninf masking */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x7f800000); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* push 0x00010000 on the stack, fixup masking */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00010000); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* push 0x00007fff on the stack, rneadd */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00007fff); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* push 0x00000001 on the stack, fixup */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00000001); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); /* and with naninf */ libxsmm_x86_instruction_vec_compute_mem( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPANDD, 1, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 24, i_micro_kernel_config->vector_name, reg_X, 0 ); /* and with fixup */ libxsmm_x86_instruction_vec_compute_mem( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPANDD, 1, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, i_micro_kernel_config->vector_name, reg_X, 1 ); /* compute naninf mask k7 */ libxsmm_x86_instruction_vec_compute_mem_mask( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPCMPD, 1, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 24, i_micro_kernel_config->vector_name, 0, LIBXSMM_X86_VEC_REG_UNDEF, 4, 7, 0 ); /* compute fixup mask k6 */ libxsmm_x86_instruction_vec_compute_mem_mask( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPCMPD, 1, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, i_micro_kernel_config->vector_name, 1, LIBXSMM_X86_VEC_REG_UNDEF, 0, 6, 0 ); /* load rneadd */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 8, i_micro_kernel_config->vector_name, 0, 0, 1, 0 ); /* load fixup */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, i_micro_kernel_config->vector_name, 1, 0, 1, 0 ); /* compute fixup */ libxsmm_x86_instruction_vec_compute_reg_mask( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPADDD, i_micro_kernel_config->vector_name, 1, 0, 0, LIBXSMM_X86_IMM_UNDEF, 6, 0 ); /* compute fixup */ libxsmm_x86_instruction_vec_compute_reg_mask( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPADDD, i_micro_kernel_config->vector_name, 0, reg_X, reg_X, LIBXSMM_X86_IMM_UNDEF, 7, 0 ); /* shift FP32 by 16bit to right */ libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPSRAD, i_micro_kernel_config->vector_name, reg_X, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 16); /* shift FP32 by 16bit to right */ libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVDW, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 0, LIBXSMM_X86_VEC_REG_UNDEF); /* store 16 bit values into ymm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 2, 0, 1 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 0, 1 ); } } } /* clean stack and restore help5 */ libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); } else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) || (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CPX) ) && ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { unsigned int l_m_2_blocking = (l_m_blocking/2)*2; l_m = 0; if ( i_micro_kernel_config->use_masking_a_c != 0 ) { for ( l_m = 0 ; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTNEPS2BF16, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 0, 0); /* store 16 bit values into ymm portion of the register */ if ( l_m == (l_m_blocking - 1) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 2, 0, 1 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 0, 1 ); } } } else { for (; l_m < l_m_2_blocking; l_m+=2 ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); unsigned int reg_X2 = l_vec_reg_acc_start + l_m+1 + (l_m_blocking * l_n); libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTNE2PS2BF16, i_micro_kernel_config->vector_name, reg_X, reg_X2, 0, 0); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 0, 0, 1 ); } for (; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTNEPS2BF16, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 0, 0); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 0, 1 ); } } } } else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) || (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) ) && ( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* pick the right instrucitons */ unsigned int inst_f32_i32 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VCVTPS2UDQ : LIBXSMM_X86_INSTR_VCVTPS2DQ; unsigned int inst_i32_i8 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VPMOVUSDB : LIBXSMM_X86_INSTR_VPMOVSDB; /* there are case where we need to load the scaling factor's address from the stack argument list */ if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) != 0 ) { libxsmm_x86_instruction_load_arg_to_reg( io_generated_code, 0, i_gp_reg_mapping->gp_reg_scf ); } /* loading scf into register 3 */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, i_gp_reg_mapping->gp_reg_scf, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, i_micro_kernel_config->vector_name, 3, 0, 1, 0 ); /* Zero out register 0 to perform relu */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, 0, 0, 0); /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); /* Convert result to F32 */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTDQ2PS, i_micro_kernel_config->vector_name, reg_X, reg_X, LIBXSMM_X86_VEC_REG_UNDEF); /* Multiply with scaling factor */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMULPS, i_micro_kernel_config->vector_name, reg_X, 3, reg_X ); /* Perform RELU */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMAXPS, i_micro_kernel_config->vector_name, reg_X, 0, reg_X); /* Round result to int32 */ libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, inst_f32_i32, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, reg_X, 0); /* down-convert to int8 */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, inst_i32_i8, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4), i_micro_kernel_config->vector_name, reg_X, ( ( l_m == (l_m_blocking - 1)) && ( i_micro_kernel_config->use_masking_a_c != 0 ) ) ? 2 : 0, 0, 1 ); } } } else { /* storing C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size), i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 ); } if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { /* determining how many prefetches we need in M direction as we just need one prefetch per cache line */ unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */ for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size)); } } } } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_initialize_avx512_mask( libxsmm_generated_code* io_generated_code, const unsigned int i_gp_reg_tmp, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_mask_count ) { unsigned int l_mask; /* init full mask */ if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_mask = 0xff; } else { l_mask = 0xffff; } /* shift right by "inverse" remainder */ l_mask = l_mask >> i_mask_count; /* move mask to GP register */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_tmp, l_mask ); if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVW, i_gp_reg_tmp, LIBXSMM_X86_AVX512_MASK, 0 ); if ( ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVD, i_gp_reg_tmp, 2, 0 ); } else if ( ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVQ, i_gp_reg_tmp, 2, 0 ); } else { /* no addtional mask is needed */ } } else { /* shouldn't happen */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } }
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const double x_shear,const double x_shear, % const double width,const double height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const double x_shear,const double y_shear, const double width,const double height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=CastDoubleToLong(ceil(min.x-0.5)); geometry.y=CastDoubleToLong(ceil(min.y-0.5)); geometry.width=(size_t) CastDoubleToLong(floor(max.x-min.x+0.5)); geometry.height=(size_t) CastDoubleToLong(floor(max.y-min.y+0.5)); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The result will be auto-croped if the artifact "deskew:auto-crop" is % defined, while the amount the image is to be deskewed, in degrees is also % saved as the artifact "deskew:angle". % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrixs, MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection) { MatrixInfo *swap; MatrixInfo *p, *q; ssize_t x; size_t step; p=source_matrixs; q=destination_matrixs; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #else magick_unreferenced(image); #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrixs=AcquireMatrixInfo(width,image->rows, sizeof(unsigned short),exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs=DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } for (j=0; j < 256; j++) { c=(unsigned char) j; for (count=0; c != 0; c>>=1) count+=c & 0x01; bits[j]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,-1,projection); (void) NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,1,projection); image_view=DestroyCacheView(image_view); destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; PixelInfo background; double count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(image,p); background.green+=QuantumScale*GetPixelGreen(image,p); background.blue+=QuantumScale*GetPixelBlue(image,p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha+=QuantumScale*GetPixelAlpha(image,p); count++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->background_color.red=(double) ClampToQuantum(QuantumRange* background.red/count); image->background_color.green=(double) ClampToQuantum(QuantumRange* background.green/count); image->background_color.blue=(double) ClampToQuantum(QuantumRange* background.blue/count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha=(double) ClampToQuantum(QuantumRange* background.alpha/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MagickPathExtent]; (void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod, exception); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; switch (rotations) { case 0: default: { rotate_image=CloneImage(image,0,0,MagickTrue,exception); break; } case 2: { rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); break; } case 1: case 3: { rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); break; } } if (rotate_image == (Image *) NULL) return((Image *) NULL); if (rotations == 0) return(rotate_image); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t y; size_t height, width; width=tile_width; if ((tile_width+tile_x) > image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_height+tile_y) > image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { const Quantum *magick_restrict tile_pixels; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { ssize_t y; /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(rotate_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; q-=GetPixelChannels(rotate_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t y; size_t height, width; width=tile_width; if ((tile_width+tile_x) > image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_height+tile_y) > image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { const Quantum *magick_restrict tile_pixels; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((width-1)-y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; background=image->background_color; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; Quantum *magick_restrict p, *magick_restrict q; ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=x_offset*GetPixelChannels(image); displacement=degrees*(double) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=CastDoubleToLong(floor((double) displacement)); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (x_offset+width+step-i) > image->columns) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,XShearImageTag,progress,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; background=image->background_color; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { double area, displacement; PixelInfo pixel, source, destination; Quantum *magick_restrict p, *magick_restrict q; ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=y_offset*GetPixelChannels(image); displacement=degrees*(double) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=CastDoubleToLong(floor((double) displacement)); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (y_offset+height+step-i) > image->rows) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,YShearImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute image size. */ bounds.width=image->columns+CastDoubleToLong(floor(fabs(shear.x)* image->rows+0.5)); bounds.x=CastDoubleToLong(ceil((double) image->columns+((fabs(shear.x)* image->rows)-image->columns)/2.0-0.5)); bounds.y=CastDoubleToLong(ceil((double) image->rows+((fabs(shear.y)* bounds.width)-image->rows)/2.0-0.5)); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,image->compose,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->alpha_trait=image->alpha_trait; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=CastDoubleToLong(floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5)); bounds.y=CastDoubleToLong(floor(((double) bounds.height-height+2)/2.0+0.5)); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,image->compose, exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->alpha_trait=image->alpha_trait; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
octree_mex.c
/* Copyright (c) 2012 by Marcin Krotkiewski, University of Oslo See ../License.txt for License Agreement. */ /* libutils headers */ #include <libutils/config.h> #include <libutils/utils.h> #include <libutils/parallel.h> #include <libmatlab/mesh.h> #include <libmatlab/mexparams.h> /* system headers */ #include <stdlib.h> #include <stdio.h> #include "octree_opts.h" /* maximum integer value that can be stored in a double */ /* such that all smaller integer values can also be stored in a double */ #define MAX_TREE_DEPTH MIN(53, (sizeof(Uint)*CHAR_BIT)) #define ROOT_DEPTH (MAX_TREE_DEPTH-1) #define MAX_VAL (double)(MaxUint ^ (MaxUint<<ROOT_DEPTH)) #define EMPTY_ELID ((dimType)-1) #ifndef NDIM #define NDIM 3 #endif /* different string constants for quadtrees and octrees */ #if NDIM==3 #define QTREE_STR_ID "oTreeMP" #define QTREE_STR "otree" #define QUADTREE_STR "octree" #define QUADRANT_STR "octant" #else #define QTREE_STR_ID "qTreeMP" #define QTREE_STR "qtree" #define QUADTREE_STR "quadtree" #define QUADRANT_STR "quadrant" #endif Double MACHEPS; Int vtk_write2d(char *model_name, dimType *elems, Double *nodes, dimType *celldata, dimType nnod, dimType nel, dimType nnodel); Int vtk_write3d(char *model_name, dimType *elems, Double *nodes, dimType *celldata, dimType nnod, dimType nel, dimType nnodel); #if NDIM==2 #define NCHILDREN 4 typedef struct { Double x, y; } t_node_coords; #else #define NCHILDREN 8 typedef struct { Double x, y, z; } t_node_coords; #endif /* quadrant structure */ #define EMPTY_QUADRANT ((dimType)-1) typedef struct _t_quadrant t_quadrant; struct _t_quadrant { Uint x_code; Uint y_code; #if NDIM==3 Uint z_code; #endif Uint level; size_t parent; size_t children[NCHILDREN]; dimType n_points; /* how many points are in the quadrant */ dimType point_id[]; /* point id of the points in quadrant. */ /* this thing is actually an array of n_leaf_points point ids */ }; /* quadtree structure */ typedef struct { char name[8]; dimType n_leaves; dimType n_quadrants; dimType n_leaf_points; size_t quadrant_size; dimType n_points; Double xmin, xmax, iextentx; Double ymin, ymax, iextenty; #if NDIM==3 Double zmin, zmax, iextentz; #endif } t_quadtree; size_t header_size = sizeof(t_quadtree); /* memory pool structure */ typedef struct { /* complete memory allocated, including the header */ /* and the subsequent list of quadrants pointed to by base_ptr */ char *head_ptr; /* base_ptr is the pointer to the quadrant array */ /* We can not use t_quadrant since the type definition is incomplete, */ /* therefore pointer arithmetic on t_quadrant* is not defined */ char *base_ptr; /* quadrant_size (size of the t_quadrant structure) */ /* depends on the n_leaf_points specified at runtime */ size_t quadrant_size; size_t size; size_t realloc_size; dimType ptr; } t_mempool; #define EMPTY_MEMPOOL_STRUCT {NULL,NULL,0,0,0,0} /* global variables */ /* search statistics */ static dimType n_leaves = 1; static Ulong n_elems_searched = 0; static Double avg_elems_searched = 0; static Ulong n_max_elems_searched = 0; /* lists of elements to be searched */ /* while looking for the element containing a marker */ static Uint nlists = 0; static dimType *slist[1024] = {0}; static size_t *slist_size[1024] = {0}; static Uint initialized = 0; /* free list structure */ void quadtree_mex_cleanup(void) { Uint i; for(i=0; i<nlists; i++) { if(slist[i]) { mfree(slist[i], sizeof(dimType)*slist_size[i][0]); } } } /* compute the quadrant pointer from the base memory pool address */ /* and quadrant offset */ #define CHILD_POINTER(node, n, mempool) \ (t_quadrant*)(mempool->base_ptr + node->children[n]) /* allocate and initialize new leaf quadrant */ /* reallocate memory pool if necessary */ #if NDIM==3 STATIC INLINE void create_child(t_quadrant **dest, Uint n, Uint _x_code, Uint _y_code, Uint _z_code, t_mempool *mempool) #else STATIC INLINE void create_child(t_quadrant **dest, Uint n, Uint _x_code, Uint _y_code, t_mempool *mempool) #endif { t_quadrant *child = NULL; size_t offset = (char*)dest[0] - mempool->base_ptr; if(mempool->ptr == mempool->size){ mempool->size += mempool->realloc_size; mrealloc(mempool->head_ptr, header_size + mempool->size*mempool->quadrant_size, mempool->realloc_size*mempool->quadrant_size); mempool->base_ptr = mempool->head_ptr + header_size; dest[0] = (t_quadrant*)(mempool->base_ptr + offset); } dest[0]->children[n] = (size_t)mempool->ptr*mempool->quadrant_size; mempool->ptr++; child = CHILD_POINTER(dest[0], n, mempool); child->x_code = _x_code; child->y_code = _y_code; #if NDIM==3 child->z_code = _z_code; #endif child->level = dest[0]->level-1; child->parent = offset; child->children[0]= (size_t)EMPTY_QUADRANT; child->n_points = 0; child->point_id[0]= EMPTY_ELID; } t_quadrant *quadtree_locate_codes(t_quadrant *dest, t_node_coords coords, t_mempool *mempool) { /* node coordinates out of bounds - point outside of the quadrant */ Double x = (Double)dest->x_code/MAX_VAL; Double y = (Double)dest->y_code/MAX_VAL; Uint x_code; Uint y_code; #if NDIM==3 Double z = (Double)dest->z_code/MAX_VAL; Uint z_code; #endif Double d = 1.0/(Double)(MAX_TREE_DEPTH-dest->level); Uint level; Uint bit; Uint child; /* check if the node belongs to this quadrant, or any of its children */ if(coords.x<x || coords.x>x+d || coords.y<y || coords.y>y+d) return NULL; /* fix the case where point is located at the domain boundary */ if(coords.x==1.0) coords.x = coords.x-MACHEPS; if(coords.y==1.0) coords.y = coords.y-MACHEPS; x_code = (Uint)(coords.x*MAX_VAL); y_code = (Uint)(coords.y*MAX_VAL); /* the same for the Z-dimension */ #if NDIM==3 if(coords.z<z || coords.z>z+d) return NULL; if(coords.z==1.0) coords.z = coords.z-MACHEPS; z_code = (Uint)(coords.z*MAX_VAL); #endif level = dest->level-1; bit = (Uint)1 << level; while((dest)->children[0] != EMPTY_QUADRANT){ #if NDIM==3 child = ((x_code & bit) !=0) | ((y_code & bit) !=0)<<1 | ((z_code & bit) !=0)<<2 ; #else child = ((x_code & bit) !=0) | ((y_code & bit) !=0)<<1; #endif dest = CHILD_POINTER(dest, child, mempool); bit >>= 1; } return dest; } /* Incrementally build a quadtree from nodes. */ /* Add nodes in sequence, quadtree structure is refined in the process. */ /* Internally the wuadtree structure is built from a normalized domain, */ /* i.e., coordinates from [0,1]. The coordinates of added points */\ /* are normalized as we go. */ void quadtree_add_node(t_quadrant *dest, Double *nodes, dimType point_id, dimType n_leaf_points, dimType *n_qtree_points, t_mempool *mempool) { t_node_coords coords; coords.x = nodes[point_id*NDIM+0]; coords.y = nodes[point_id*NDIM+1]; #if NDIM==3 coords.z = nodes[point_id*NDIM+2]; #endif { /* normalize coordinates */ t_quadtree *tree = (t_quadtree *)mempool->head_ptr; coords.x = (coords.x - tree->xmin)*tree->iextentx; //(tree->xmax - tree->xmin); coords.y = (coords.y - tree->ymin)*tree->iextenty; //(tree->ymax - tree->ymin); #if NDIM==3 coords.z = (coords.z - tree->zmin)*tree->iextentz; //(tree->zmax - tree->zmin); #endif } /* look for the destination quadrant */ if(dest->children[0] != EMPTY_QUADRANT){ dest = quadtree_locate_codes(dest, coords, mempool); } /* nothing to do - point outside of the quadrant domain */ if(!dest) { #if NDIM==3 USERERROR(QTREE_STR": point outside of domain: (%lf, %lf, %lf)", MUTILS_INVALID_PARAMETER, nodes[point_id*NDIM+0], nodes[point_id*NDIM+1], nodes[point_id*NDIM+2]); #else USERERROR(QTREE_STR": point outside of domain: (%lf, %lf)", MUTILS_INVALID_PARAMETER, nodes[point_id*NDIM+0], nodes[point_id*NDIM+1]); #endif return; } /* if the quadrant has free space simply add the node */ if(dest->n_points < n_leaf_points){ dest->point_id[dest->n_points++] = point_id; (*n_qtree_points)++; return; } /* safequard - quadtree maximum level exceeded */ if(dest->level==0) { USERWARNING(QTREE_STR": maximum tree level exceeded (too much local refinement).\n Point %"PRI_DIMTYPE" not added to quadtree.", MUTILS_INTEGER_OVERFLOW, point_id); return; } /* split the leaf (dest) and reassign the nodes to new quadtree leaves */ /* do not clear the node information in the parent */ /* useful when leaves are empty and we want to have */ /* some information about nearby points/elements during search */ { Uint bit = (Uint)1<<(dest->level-1); #if NDIM==2 create_child((&dest), 0, (dest->x_code) , (dest->y_code) , mempool); create_child((&dest), 1, (dest->x_code) | bit, (dest->y_code) , mempool); create_child((&dest), 2, (dest->x_code) , (dest->y_code) | bit, mempool); create_child((&dest), 3, (dest->x_code) | bit, (dest->y_code) | bit, mempool); /* update number of leaves */ n_leaves += 3; #else create_child((&dest), 0, (dest->x_code) , (dest->y_code) , (dest->z_code) , mempool); create_child((&dest), 1, (dest->x_code) | bit, (dest->y_code) , (dest->z_code) , mempool); create_child((&dest), 2, (dest->x_code) , (dest->y_code) | bit, (dest->z_code) , mempool); create_child((&dest), 3, (dest->x_code) | bit, (dest->y_code) | bit, (dest->z_code) , mempool); create_child((&dest), 4, (dest->x_code) , (dest->y_code) , (dest->z_code) | bit, mempool); create_child((&dest), 5, (dest->x_code) | bit, (dest->y_code) , (dest->z_code) | bit, mempool); create_child((&dest), 6, (dest->x_code) , (dest->y_code) | bit, (dest->z_code) | bit, mempool); create_child((&dest), 7, (dest->x_code) | bit, (dest->y_code) | bit, (dest->z_code) | bit, mempool); /* update number of leaves */ n_leaves += 7; #endif } /* add the old nodes directly to the correct child quadrant */ { dimType ptid; t_quadrant *child; Uint childid; Uint bit = (Uint)1 << (dest->level-1); Uint x_code, y_code; #if NDIM==3 Uint z_code; #endif /* NOTE: memory pool might have been reallocated, refresh tree pointer! */ t_quadtree *tree = (t_quadtree *)mempool->head_ptr; for(ptid=0; ptid<n_leaf_points; ptid++){ coords.x = nodes[(size_t)dest->point_id[ptid]*NDIM+0]; coords.y = nodes[(size_t)dest->point_id[ptid]*NDIM+1]; /* normalize coordinates */ coords.x = (coords.x - tree->xmin)*tree->iextentx; //(tree->xmax - tree->xmin); coords.y = (coords.y - tree->ymin)*tree->iextenty; //(tree->ymax - tree->ymin); x_code = (Uint)(coords.x*MAX_VAL); y_code = (Uint)(coords.y*MAX_VAL); #if NDIM==3 coords.z = nodes[(size_t)dest->point_id[ptid]*NDIM+2]; coords.z = (coords.z - tree->zmin)*tree->iextentz; //(tree->zmax - tree->zmin); z_code = (Uint)(coords.z*MAX_VAL); childid = ((x_code & bit) !=0) | ((y_code & bit) !=0)<<1 | ((z_code & bit) !=0)<<2 ; #else childid = ((x_code & bit) !=0) | ((y_code & bit) !=0)<<1; #endif child = CHILD_POINTER(dest, childid, mempool); child->point_id[child->n_points++] = dest->point_id[ptid]; } } /* add the new node recursively */ quadtree_add_node(dest, nodes, point_id, n_leaf_points, n_qtree_points, mempool); } /* linearize the quadtree - extract leaves in Z-curve ordering */ #ifdef _MSC_VER #pragma auto_inline(off) #endif void quadtree_extract_leaves(t_quadrant *dest, t_quadrant **tree_leaves, dimType *itree_leaves, t_mempool *mempool) { /* store the leaves */ if(dest->children[0]==EMPTY_QUADRANT){ tree_leaves[*itree_leaves] = dest; (*itree_leaves)++; return; } /* traverse the subtrees */ quadtree_extract_leaves(CHILD_POINTER(dest, 0, mempool), tree_leaves, itree_leaves, mempool); quadtree_extract_leaves(CHILD_POINTER(dest, 1, mempool), tree_leaves, itree_leaves, mempool); quadtree_extract_leaves(CHILD_POINTER(dest, 2, mempool), tree_leaves, itree_leaves, mempool); quadtree_extract_leaves(CHILD_POINTER(dest, 3, mempool), tree_leaves, itree_leaves, mempool); #if NDIM==3 quadtree_extract_leaves(CHILD_POINTER(dest, 4, mempool), tree_leaves, itree_leaves, mempool); quadtree_extract_leaves(CHILD_POINTER(dest, 5, mempool), tree_leaves, itree_leaves, mempool); quadtree_extract_leaves(CHILD_POINTER(dest, 6, mempool), tree_leaves, itree_leaves, mempool); quadtree_extract_leaves(CHILD_POINTER(dest, 7, mempool), tree_leaves, itree_leaves, mempool); #endif } /* linearize the quadtree - extract points in Z-curve ordering */ #ifdef _MSC_VER #pragma auto_inline(off) #endif void quadtree_extract_points(t_quadrant *dest, dimType *points, dimType *points_ptr, t_mempool *mempool) { dimType i; /* copy point data from the leaves */ if(dest->children[0]==EMPTY_QUADRANT){ if(dest->n_points){ for(i=0; i<dest->n_points; i++){ points[(*points_ptr)+i] = dest->point_id[i]+ONE_BASED_INDEX; } (*points_ptr) += dest->n_points; } return; } /* traverse the subtrees */ quadtree_extract_points(CHILD_POINTER(dest, 0, mempool), points, points_ptr, mempool); quadtree_extract_points(CHILD_POINTER(dest, 1, mempool), points, points_ptr, mempool); quadtree_extract_points(CHILD_POINTER(dest, 2, mempool), points, points_ptr, mempool); quadtree_extract_points(CHILD_POINTER(dest, 3, mempool), points, points_ptr, mempool); #if NDIM==3 quadtree_extract_points(CHILD_POINTER(dest, 4, mempool), points, points_ptr, mempool); quadtree_extract_points(CHILD_POINTER(dest, 5, mempool), points, points_ptr, mempool); quadtree_extract_points(CHILD_POINTER(dest, 6, mempool), points, points_ptr, mempool); quadtree_extract_points(CHILD_POINTER(dest, 7, mempool), points, points_ptr, mempool); #endif } #define ENQUEUE_ERR_MSG "Integer overflow in quadtree_locate_element at memory reallocation." #define ENQUEUE_NEIGHBOR(n) \ if((n)!=EMPTY_ELID){ \ if(thr_slist_nel==thr_slist_size){ \ size_t size; \ uint64_t temp; \ safemult_u(sizeof(dimType), 2, temp, ENQUEUE_ERR_MSG); \ safemult_u(temp, thr_slist_size, temp, ENQUEUE_ERR_MSG); \ managed_type_cast(size_t, size, temp, ENQUEUE_ERR_MSG); \ mrealloc(slist[thrid],sizeof(dimType)*2*thr_slist_size, sizeof(dimType)*thr_slist_size); \ slist_size[thrid][0] *= 2; \ thr_slist = slist[thrid]; \ thr_slist_size = slist_size[thrid][0]; \ } \ thr_slist[thr_slist_nel++] = (n); \ } \ #if NDIM==2 #include "point_in_triangle.c" #else #include "point_in_tetrahedron.c" #endif /***********************************************************/ /* MATLAB INTERFACE */ /***********************************************************/ mxArray *qtree2mex(t_quadtree *tree, size_t tree_size){ #define n_fieldnames 5 const char *fieldnames[n_fieldnames] = {QTREE_STR, "n_leaves", "n_leaf_points", "n_"QUADRANT_STR"s", "n_points"}; mxArray *outp = mxCreateStructMatrix(1, 1, n_fieldnames, fieldnames); mxArray *field; Uint n = 0; mxClassID class_id; field = mxCreateNumericMatrix(0, 0, mxUINT8_CLASS,mxREAL); mxSetData(field, (void*)tree); mxSetN(field, 1); mxSetM(field, tree_size); mxSetField(outp, 0, fieldnames[n++], field); get_matlab_class(dimType, class_id); field = mxCreateNumericMatrix(1,1,class_id,mxREAL); ((dimType*)mxGetData(field))[0] = tree->n_leaves; mxSetField(outp, 0, fieldnames[n++], field); field = mxCreateNumericMatrix(1,1,class_id,mxREAL); ((dimType*)mxGetData(field))[0] = tree->n_leaf_points; mxSetField(outp, 0, fieldnames[n++], field); field = mxCreateNumericMatrix(1,1,class_id,mxREAL); ((dimType*)mxGetData(field))[0] = tree->n_quadrants; mxSetField(outp, 0, fieldnames[n++], field); field = mxCreateNumericMatrix(1,1,class_id,mxREAL); ((dimType*)mxGetData(field))[0] = tree->n_points; mxSetField(outp, 0, fieldnames[n++], field); return outp; } t_quadtree* mex2qtree(const mxArray *qtree_struct){ mxArray *field; t_quadtree *qtree; if(!mxIsStruct(qtree_struct)){ USERERROR(QTREE_STR"_struct is not a structure", MUTILS_INVALID_PARAMETER); } /* quadtree memory pointer */ field = mxGetField(qtree_struct, 0, QTREE_STR); if(!field){ USERERROR(QTREE_STR"_struct is not a valid "QUADTREE_STR, MUTILS_INVALID_PARAMETER); } qtree = (t_quadtree*)mxGetData(field); /* verify the contents - memory area header */ qtree->name[7] = 0; if(strcmp(qtree->name, QTREE_STR_ID)){ USERERROR(QTREE_STR"_struct is not a valid "QUADTREE_STR" - invalid header", MUTILS_INVALID_PARAMETER); } return qtree; } mxArray *mex_quadtree_create(int nargin, const mxArray *pargin[]) { size_t m, n; char buff[256]; Double *points = NULL; dimType n_points; dimType n_dim; dimType i; int arg = 1; dimType n_leaf_points; /* domain size */ Double xmin, xmax; Double ymin, ymax; #if NDIM==3 Double zmin, zmax; #endif t_mempool mempool = {NULL,NULL,0,0,0,0}; size_t initial_size = 0; t_quadtree *qtree = NULL; t_quadrant *root = NULL; dimType n_qtree_points = 0; size_t pow2m1; if(!initialized){ initialized = 1; mexAtExit(quadtree_mex_cleanup); } #if NDIM==3 if(nargin<8){ USERERROR("Usage: "QUADTREE_STR" = "QUADTREE_STR"('create', POINTS, xmin, xmax, ymin, ymax, zmin, zmax, [max_points_in_leaf])", MUTILS_INVALID_PARAMETER); } #else if(nargin<6){ USERERROR("Usage: "QUADTREE_STR" = "QUADTREE_STR"('create', POINTS, xmin, xmax, ymin, ymax, [max_points_in_leaf])", MUTILS_INVALID_PARAMETER); } #endif /* POINTS */ { char _buff[10]; sprintf(_buff, "%d", NDIM); m = NDIM; n = 0; points = mex_get_matrix(Double, pargin[arg++], &m, &n, "POINTS", _buff, "number of points", 0); } SNPRINTF(buff, 255, "No dimensions of 'POINTS' can be larger than %"PRI_DIMTYPE, MaxDimType); managed_type_cast(dimType, n_dim, m, buff); managed_type_cast(dimType, n_points, n, buff); /* domain extents */ m = 1; xmin = mex_get_matrix(Double, pargin[arg++], &m, &m, "xmin", "1", "1", 0)[0]; xmax = mex_get_matrix(Double, pargin[arg++], &m, &m, "xmax", "1", "1", 0)[0]; ymin = mex_get_matrix(Double, pargin[arg++], &m, &m, "ymin", "1", "1", 0)[0]; ymax = mex_get_matrix(Double, pargin[arg++], &m, &m, "ymax", "1", "1", 0)[0]; #if NDIM==3 zmin = mex_get_matrix(Double, pargin[arg++], &m, &m, "zmin", "1", "1", 0)[0]; zmax = mex_get_matrix(Double, pargin[arg++], &m, &m, "zmax", "1", "1", 0)[0]; #endif /* maximum number of points in quadrant */ if(nargin>arg){ n_leaf_points = mex_get_integer_scalar(dimType, pargin[arg++], "max_points_in_leaf", 0, 0); arg++; } else { n_leaf_points = 1; } if(n_leaf_points>n_points) n_leaf_points = n_points; if(n_leaf_points<1) n_leaf_points = 1; tic(); /* setup the memory pool */ /* Allocate roughly the correct amount of memory */ /* for the case when points are spread uniformly in space. */ pow2m1 = pow2m1_roundup(n_leaf_points); initial_size = (size_t)n_points*2/(pow2m1+1); mempool.size = initial_size; mempool.realloc_size = mempool.size/2; mempool.ptr = 1; mempool.quadrant_size = sizeof(t_quadrant) + sizeof(dimType)*n_leaf_points; mcalloc(mempool.head_ptr, header_size + mempool.size*mempool.quadrant_size); mempool.base_ptr = mempool.head_ptr + header_size; /* set real domain dimensions for coordinate normalization */ qtree = (t_quadtree*)mempool.head_ptr; qtree->xmin = xmin; qtree->xmax = xmax; qtree->ymin = ymin; qtree->ymax = ymax; qtree->iextentx = 1.0/(qtree->xmax - qtree->xmin); qtree->iextenty = 1.0/(qtree->ymax - qtree->ymin); #if NDIM==3 qtree->zmin = zmin; qtree->zmax = zmax; qtree->iextentz = 1.0/(qtree->zmax - qtree->zmin); #endif /* add root quadrant */ root = (t_quadrant*)mempool.base_ptr; root->level = ROOT_DEPTH; root->x_code = 0; root->y_code = 0; #if NDIM==3 root->z_code = 0; #endif root->n_points = 0; root->children[0] = (size_t)EMPTY_QUADRANT; root->parent = (size_t)EMPTY_QUADRANT; n_leaves = 1; /* run */ n_qtree_points = 0; for(i=0; i<n_points; i++){ /* memory pool can be reallocated in quadtree_add_node */ root = (t_quadrant*)mempool.base_ptr; quadtree_add_node(root, points, i, n_leaf_points, &n_qtree_points, &mempool); } ntoc("actual work"); /* fill the memory header */ qtree = (t_quadtree*)mempool.head_ptr; strncpy(qtree->name, QTREE_STR_ID, 8); qtree->n_leaves = n_leaves; qtree->n_quadrants = mempool.ptr; qtree->n_leaf_points = n_leaf_points; qtree->quadrant_size = mempool.quadrant_size; qtree->n_points = n_qtree_points; #if 0 if(n_qtree_points != n_points){ #if NDIM==3 MESSAGE("Some of the points were outside of the specified domain:\n\n\ (xmin=%.1e, xmax=%.1e, ymin=%.1e, ymax=%.1e, zmin=%.1e, zmax=%.1e)\n\n\ and were not added to the "QUADTREE_STR".\n \ Please specify correct domain extents.", xmin, xmax, ymin, ymax, zmin, zmax); #else MESSAGE("Some of the points were outside of the specified domain:\n\n\ (xmin=%.1e, xmax=%.1e, ymin=%.1e, ymax=%.1e)\n\nand were not added to the "QUADTREE_STR".\n \ Please specify correct domain extents.", xmin, xmax, ymin, ymax); #endif } #endif /* reallocate memory using MATLAB's allocation routines */ { t_quadtree *_qtree; mmalloc_global(_qtree, header_size + mempool.size*mempool.quadrant_size); memcpy(_qtree, qtree, header_size + mempool.size*mempool.quadrant_size); mfree(qtree, header_size + mempool.size*mempool.quadrant_size); qtree = _qtree; mpersistent(qtree, header_size + mempool.size*mempool.quadrant_size); } return qtree2mex(qtree, header_size + mempool.size*mempool.quadrant_size); } mxArray *mex_quadtree_locate(int nargin, const mxArray *pargin[]) { size_t m, n; char buff[256]; Uint arg = 1; dimType *element_map = NULL; dimType n_dim; t_quadtree *tree = NULL; t_mempool mempool = EMPTY_MEMPOOL_STRUCT; t_mesh mesh = EMPTY_MESH_STRUCT; Ulong n_markers; Double *markers; dimType *elids = NULL; mxArray *outp = NULL; t_opts opts; if(!initialized){ initialized = 1; mexAtExit(quadtree_mex_cleanup); #ifdef ROBUST_PREDICATES exactinit(); #endif } if(nargin<4){ USERERROR("Usage: [MAP, stats] = "QUADTREE_STR"('locate', "QUADTREE_STR", MESH, MARKERS, [MAP], [opts])", MUTILS_INVALID_PARAMETER); } /* qtree structure */ tree = mex2qtree(pargin[arg++]); mempool.head_ptr = (char*)tree; mempool.base_ptr = mempool.head_ptr + header_size; mempool.quadrant_size = tree->quadrant_size; mempool.ptr = tree->n_quadrants; /* triangular mesh structure */ mesh = mex2mesh(pargin[arg++], NDIM); if(!mesh.neighbors){ USERERROR("MESH must contain NEIGHBORS information", MUTILS_INVALID_MESH); return NULL; } /* MARKERS */ { char _buff[10]; sprintf(_buff, "%d", NDIM); m = NDIM; n = 0; markers = mex_get_matrix(Double, pargin[arg++], &m, &n, "MARKERS", _buff, "number of markers", 0); } SNPRINTF(buff, 255, "No dimensions of 'MARKERS' can be larger than %"PRI_ULONG, MaxUlong); managed_type_cast(dimType, n_dim, m, buff); managed_type_cast(Ulong, n_markers, n, buff); /* optional - previous marker-to-element map to use */ if(nargin>=5){ m = 1; n = n_markers; element_map = mex_get_matrix(dimType, pargin[arg++], &m, &n, "MAP", "1", "number of markers", 1); } /* options */ if(nargin>=6){ opts = mex2opts(pargin[5]); } else { opts = mex2opts(NULL); } /* optional - inplace map. Existing MAP input will be overwritten and returned as output. */ /* Not allowed in MATLAB, so be careful and make sure MAP is not used elsewhere/linked to. */ if(opts.inplace && element_map){ opts.inplace = opts.inplace!=0; } if(opts.inplace){ outp = (mxArray *)pargin[4]; elids = element_map; } /* MEX output, needs to be global and persistent */ if(!outp){ mcalloc_global(elids, sizeof(dimType)*n_markers); } n_elems_searched = 0; n_max_elems_searched = 0; /* use default/environment defined number of threads */ parallel_set_num_threads(opts.nthreads); #ifdef USE_OPENMP #pragma omp parallel #endif { Ulong i; Uint thrid, nthr; Ulong marker_start, marker_end; dimType elid = EMPTY_ELID; Ulong nel_searched; t_quadrant *quadrant = NULL; Ulong thr_n_elems_searched = 0; Ulong thr_n_max_elems_searched = 0; t_quadrant *root = NULL; Ulong blk_size; Ulong *map; mcalloc(map, sizeof(Ulong)*mesh.n_elems); /* locate the markers in the elements using the quadtree */ root = (t_quadrant*)mempool.base_ptr; parallel_get_info(&thrid, &nthr); if(opts.cpu_affinity) affinity_bind(thrid, opts.cpu_start + thrid); blk_size = n_markers/nthr+1; marker_start = blk_size*thrid; marker_end = blk_size*(thrid+1); marker_end = MIN(n_markers, marker_end); /* global list initialization */ nlists = MAX(nlists, nthr); if(slist[thrid]==NULL){ /* allocate a lot to avoid page sharing between threads */ mmalloc(slist[thrid], sizeof(dimType)*4096); mmalloc(slist_size[thrid], sizeof(size_t)*4096); slist_size[thrid][0] = 4096; } for(i=marker_start; i<marker_end; i++){ elid = EMPTY_ELID; /* prefetch markers - non-temporal to make space */ /* for the qtree structure in the CPU caches */ /* if(i+16<marker_end) _mm_prefetch(((char*)markers)+(i+16), _MM_HINT_NTA); */ if(element_map){ elid = element_map[i]; if(elid<ONE_BASED_INDEX || elid-ONE_BASED_INDEX>=mesh.n_elems) elid = EMPTY_ELID; else elid -= ONE_BASED_INDEX; } if(elid==EMPTY_ELID){ /* Locate the quadrant. */ /* quadrant is needed only to get some 'nearby' element id. */ /* The correct element containing the marker is located */ /* by searching the element neighbors. */ /* uptree traversal does not speed up things at all */ /* even if the input points are reasonably sorted */ /* quadrant = quadtree_locate_sorted(quadrant, markers[i*2+0], markers[i*2+1], &mempool); */ t_node_coords coords; /* normalize coordinates */ coords.x = (markers[(size_t)i*NDIM+0] - tree->xmin)*tree->iextentx; //(tree->xmax - tree->xmin); coords.y = (markers[(size_t)i*NDIM+1] - tree->ymin)*tree->iextenty; //(tree->ymax - tree->ymin); #if NDIM==3 coords.z = (markers[(size_t)i*NDIM+2] - tree->zmin)*tree->iextentz; //(tree->zmax - tree->zmin); #endif quadrant = quadtree_locate_codes(root, coords, &mempool); if(quadrant){ /* Find a nearby node in the quadtree. */ /* If the given quadrant is empty, */ /* return a node stored in first non-empty parent */ elid = quadrant->point_id[0]; while(elid==EMPTY_ELID){ if(quadrant->parent == EMPTY_QUADRANT) break; quadrant = (t_quadrant*)(quadrant->parent + mempool.base_ptr); elid = quadrant->point_id[0]; } } } /* find containing element */ /* NOTE: coordinate normalization not needed here since we do a mesh search, */ /* not a quadtree search. */ #if NDIM==3 elid = quadtree_locate_tet(elid, i+1, markers+(size_t)i*NDIM, mesh, map, &nel_searched, thrid); #else elid = quadtree_locate_tri(elid, i+1, markers+(size_t)i*NDIM, mesh, map, &nel_searched, thrid); #endif elids[i] = ONE_BASED_INDEX + elid; thr_n_elems_searched += nel_searched; thr_n_max_elems_searched = MAX(thr_n_max_elems_searched, nel_searched); } #ifdef USE_OPENMP #pragma omp atomic #endif n_elems_searched += thr_n_elems_searched; #ifdef USE_OPENMP #pragma omp critical #endif n_max_elems_searched = MAX(n_max_elems_searched, thr_n_max_elems_searched); mfree(map, sizeof(Ulong)*mesh.n_elems); } avg_elems_searched = (Double)n_elems_searched/n_markers; if(!outp) outp = mex_set_matrix(dimType, elids, 1, n_markers); return outp; } mxArray *mex_quadtree_reorder(int nargin, const mxArray *pargin[]) { Uint arg = 1; t_quadtree *tree = NULL; t_mempool mempool = EMPTY_MEMPOOL_STRUCT; dimType *I; dimType points_ptr = 0; mxArray *outp; if(!initialized){ initialized = 1; mexAtExit(quadtree_mex_cleanup); } if(nargin<2){ USERERROR("Usage: I = "QUADTREE_STR"('reorder', "QUADTREE_STR")", MUTILS_INVALID_PARAMETER); } tree = mex2qtree(pargin[arg++]); mempool.head_ptr = (char*)tree; mempool.base_ptr = mempool.head_ptr + header_size; mempool.quadrant_size = tree->quadrant_size; mempool.ptr = tree->n_quadrants; /* MEX output, needs to be global and persistent */ mmalloc_global(I, sizeof(dimType)*tree->n_points); /* extract nodes in the Z-ordering */ quadtree_extract_points((t_quadrant*)mempool.base_ptr, I, &points_ptr, &mempool); outp = mex_set_matrix(dimType, I, 1, points_ptr); return outp; } void mex_vtkwrite(int nargin, const mxArray *pargin[]) { dimType i; /* prepare vtk data */ t_quadrant **tree_leaves; dimType itree_leaves = 0; Uint arg = 1; t_quadtree *tree; t_mempool mempool = EMPTY_MEMPOOL_STRUCT; t_quadrant *root; Double *vtk_nodes; dimType *vtk_elems; dimType *vtk_celld; dimType n_cells = 0; char fname[512]; if(!initialized){ initialized = 1; mexAtExit(quadtree_mex_cleanup); } if(nargin<2){ USERERROR("Usage: "QUADTREE_STR"('vtkwrite', "QUADTREE_STR", [file_name])", MUTILS_INVALID_PARAMETER); } /* quadtree */ tree = mex2qtree(pargin[arg++]); mempool.head_ptr = (char*)tree; mempool.base_ptr = mempool.head_ptr + header_size; mempool.quadrant_size = tree->quadrant_size; mempool.ptr = tree->n_quadrants; /* file name */ if(nargin>2){ if(!mxIsChar(pargin[arg])) USERERROR("'file_name' must be a string", MUTILS_INVALID_PARAMETER); if(0!=mxGetString(pargin[arg], fname, 511)) USERERROR("file_name too long, can be maximum 511 characters.", MUTILS_INVALID_PARAMETER); } else { sprintf(fname, "%s", QUADTREE_STR); } root = (t_quadrant*)mempool.base_ptr; mcalloc(tree_leaves, sizeof(t_quadrant*)*n_leaves); quadtree_extract_leaves(root, tree_leaves, &itree_leaves, &mempool); mcalloc(vtk_nodes, sizeof(Double)*NCHILDREN*n_leaves*NDIM); mcalloc(vtk_elems, sizeof(dimType)*NCHILDREN*n_leaves); mcalloc(vtk_celld, sizeof(dimType)*n_leaves); for(i=0; i<n_leaves; i++){ Double mix, miy, max, may; Double dx, dy; #if NDIM==3 Double miz, maz, dz; #endif dx = (1L<<(tree_leaves[i]->level))/MAX_VAL; dy = (1L<<(tree_leaves[i]->level))/MAX_VAL; mix = tree_leaves[i]->x_code/MAX_VAL; miy = tree_leaves[i]->y_code/MAX_VAL; max = dx + mix; may = dy + miy; dx = dx*0.03; dy = dy*0.03; #if NDIM==3 dz = (1L<<(tree_leaves[i]->level))/MAX_VAL; miz = tree_leaves[i]->z_code/MAX_VAL; maz = dz + miz; dz = dz*0.03; #endif vtk_nodes[i*NCHILDREN*NDIM + 0*NDIM + 0] = mix+dx; vtk_nodes[i*NCHILDREN*NDIM + 0*NDIM + 1] = miy+dy; vtk_nodes[i*NCHILDREN*NDIM + 1*NDIM + 0] = max-dx; vtk_nodes[i*NCHILDREN*NDIM + 1*NDIM + 1] = miy+dy; vtk_nodes[i*NCHILDREN*NDIM + 2*NDIM + 0] = max-dx; vtk_nodes[i*NCHILDREN*NDIM + 2*NDIM + 1] = may-dy; vtk_nodes[i*NCHILDREN*NDIM + 3*NDIM + 0] = mix+dx; vtk_nodes[i*NCHILDREN*NDIM + 3*NDIM + 1] = may-dy; vtk_elems[i*NCHILDREN + 0] = i*NCHILDREN + 0; vtk_elems[i*NCHILDREN + 1] = i*NCHILDREN + 1; vtk_elems[i*NCHILDREN + 2] = i*NCHILDREN + 2; vtk_elems[i*NCHILDREN + 3] = i*NCHILDREN + 3; #if NDIM==3 /* add Z-coordinate to first 4 nodes */ vtk_nodes[i*NCHILDREN*NDIM + 0*NDIM + 2] = miz+dz; vtk_nodes[i*NCHILDREN*NDIM + 1*NDIM + 2] = miz+dz; vtk_nodes[i*NCHILDREN*NDIM + 2*NDIM + 2] = miz+dz; vtk_nodes[i*NCHILDREN*NDIM + 3*NDIM + 2] = miz+dz; /* add 4 more nodes */ vtk_nodes[i*NCHILDREN*NDIM + 4*NDIM + 0] = mix+dx; vtk_nodes[i*NCHILDREN*NDIM + 4*NDIM + 1] = miy+dy; vtk_nodes[i*NCHILDREN*NDIM + 4*NDIM + 2] = maz-dz; vtk_nodes[i*NCHILDREN*NDIM + 5*NDIM + 0] = max-dx; vtk_nodes[i*NCHILDREN*NDIM + 5*NDIM + 1] = miy+dy; vtk_nodes[i*NCHILDREN*NDIM + 5*NDIM + 2] = maz-dz; vtk_nodes[i*NCHILDREN*NDIM + 6*NDIM + 0] = max-dx; vtk_nodes[i*NCHILDREN*NDIM + 6*NDIM + 1] = may-dy; vtk_nodes[i*NCHILDREN*NDIM + 6*NDIM + 2] = maz-dz; vtk_nodes[i*NCHILDREN*NDIM + 7*NDIM + 0] = mix+dx; vtk_nodes[i*NCHILDREN*NDIM + 7*NDIM + 1] = may-dy; vtk_nodes[i*NCHILDREN*NDIM + 7*NDIM + 2] = maz-dz; vtk_elems[i*NCHILDREN + 4] = i*NCHILDREN + 4; vtk_elems[i*NCHILDREN + 5] = i*NCHILDREN + 5; vtk_elems[i*NCHILDREN + 6] = i*NCHILDREN + 6; vtk_elems[i*NCHILDREN + 7] = i*NCHILDREN + 7; #endif vtk_celld[i] = tree_leaves[i]->n_points; n_cells += vtk_celld[i]; } #if NDIM==3 vtk_write3d(fname, vtk_elems, vtk_nodes, vtk_celld, n_leaves*NCHILDREN, n_leaves, NCHILDREN); #else vtk_write2d(fname, vtk_elems, vtk_nodes, vtk_celld, n_leaves*NCHILDREN, n_leaves, NCHILDREN); #endif mfree(tree_leaves, sizeof(t_quadrant*)*n_leaves); mfree(vtk_nodes, sizeof(Double)*NCHILDREN*n_leaves*NDIM); mfree(vtk_elems, sizeof(dimType)*NCHILDREN*n_leaves); mfree(vtk_celld, sizeof(dimType)*n_leaves); } Int vtk_write2d(char *model_name, dimType *elems, Double *nodes, dimType *celldata, dimType nnod, dimType nel, dimType nnodel) { FILE *out_vtk; Ulong i; char file_name[512+4]; sprintf(file_name, "%s.vtk", model_name); out_vtk=fopen(file_name, "w"); fprintf(out_vtk,"# vtk DataFile Version 3.0\n"); fprintf(out_vtk,"my cool data\n"); fprintf(out_vtk,"ASCII\n"); fprintf(out_vtk,"DATASET UNSTRUCTURED_GRID\n"); fprintf(out_vtk,"POINTS %"PRI_DIMTYPE" double\n", nnod); for (i=0;i<nnod;i++){ fprintf(out_vtk,"%lf %lf 0.0\n", nodes[2*i+0], nodes[2*i+1]); } fprintf(out_vtk,"CELLS %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n", nel, (1+nnodel)*nel); for (i=0;i<nel;i++){ fprintf(out_vtk,"4 %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n", elems[nnodel*i+0], elems[nnodel*i+1], elems[nnodel*i+2], elems[nnodel*i+3]); } fprintf(out_vtk,"CELL_TYPES %"PRI_DIMTYPE"\n", nel); for (i=0;i<nel;i++){ fprintf(out_vtk,"9\n"); } fprintf(out_vtk,"CELL_DATA %"PRI_DIMTYPE"\n", nel); fprintf(out_vtk,"SCALARS n_nodes_in_quadrant long 1\n"); fprintf(out_vtk,"LOOKUP_TABLE default\n"); for (i=0;i<nel;i++){ fprintf(out_vtk,"%"PRI_DIMTYPE"\n", celldata[i]); } fclose(out_vtk); return 0; } Int vtk_write3d(char *model_name, dimType *elems, Double *nodes, dimType *celldata, dimType nnod, dimType nel, dimType nnodel) { FILE *out_vtk; Ulong i; char file_name[512+4]; sprintf(file_name, "%s.vtk", model_name); out_vtk=fopen(file_name, "w"); fprintf(out_vtk,"# vtk DataFile Version 3.0\n"); fprintf(out_vtk,"my cool data\n"); fprintf(out_vtk,"ASCII\n"); fprintf(out_vtk,"DATASET UNSTRUCTURED_GRID\n"); fprintf(out_vtk,"POINTS %"PRI_DIMTYPE" double\n", nnod); for (i=0;i<nnod;i++){ fprintf(out_vtk,"%lf %lf %lf\n", nodes[3*i+0], nodes[3*i+1], nodes[3*i+2]); } fprintf(out_vtk,"CELLS %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n", nel, (1+nnodel)*nel); for (i=0;i<nel;i++){ fprintf(out_vtk,"%"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE " %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE" %"PRI_DIMTYPE"\n", nnodel, elems[nnodel*i+0], elems[nnodel*i+1], elems[nnodel*i+2], elems[nnodel*i+3], elems[nnodel*i+4], elems[nnodel*i+5], elems[nnodel*i+6], elems[nnodel*i+7]); } fprintf(out_vtk,"CELL_TYPES %"PRI_DIMTYPE"\n", nel); for (i=0;i<nel;i++){ fprintf(out_vtk,"12\n"); } fprintf(out_vtk,"CELL_DATA %"PRI_DIMTYPE"\n", nel); fprintf(out_vtk,"SCALARS n_nodes_in_quadrant long 1\n"); fprintf(out_vtk,"LOOKUP_TABLE default\n"); for (i=0;i<nel;i++){ fprintf(out_vtk,"%"PRI_DIMTYPE"\n", celldata[i]); } fclose(out_vtk); return 0; } mxArray *mex_quadtree_stats(void) { #undef n_fieldnames #define n_fieldnames 4 const char *fieldnames[n_fieldnames] = {"n_elems_searched", "avg_elems_searched", "n_max_elems_searched", "list_size"}; mxArray *outp = mxCreateStructMatrix(1, 1, n_fieldnames, fieldnames); mxArray *field; Uint n = 0; field = mxCreateNumericMatrix(1,1,mxUINT64_CLASS,mxREAL); ((Ulong*)mxGetData(field))[0] = n_elems_searched; mxSetField(outp, 0, fieldnames[n++], field); field = mxCreateNumericMatrix(1,1,mxDOUBLE_CLASS,mxREAL); ((double*)mxGetData(field))[0] = avg_elems_searched; mxSetField(outp, 0, fieldnames[n++], field); field = mxCreateNumericMatrix(1,1,mxUINT64_CLASS,mxREAL); ((Ulong*)mxGetData(field))[0] = n_max_elems_searched; mxSetField(outp, 0, fieldnames[n++], field); field = mxCreateNumericMatrix(1,1,mxUINT64_CLASS,mxREAL); if(nlists){ ((Ulong*)mxGetData(field))[0] = slist_size[0][0]; } else { ((Ulong*)mxGetData(field))[0] = -1; } mxSetField(outp, 0, fieldnames[n++], field); return outp; } void mexFunction(int nargout, mxArray *pargout [ ], int nargin, const mxArray *pargin[]) { int arg = 0; char cmd[256]; /* get machine epsilon */ MACHEPS = macheps(); if (nargin < 1) MEXHELP; /* command */ { if(!mxIsChar(pargin[arg])){ USERERROR("command parameter must be a string", MUTILS_INVALID_PARAMETER); } mxGetString(pargin[arg], cmd, 255); arg++; } if(!strcmp(cmd, "create")){ if(nargout>0){ pargout[0] = mex_quadtree_create(nargin, pargin); } DEBUG_STATISTICS; return; } if(!strcmp(cmd, "vtkwrite")){ mex_vtkwrite(nargin, pargin); DEBUG_STATISTICS; return; } if(!strcmp(cmd, "locate")){ if(nargout>0){ pargout[0] = mex_quadtree_locate(nargin, pargin); } if(nargout>1){ pargout[1] = mex_quadtree_stats(); } DEBUG_STATISTICS; return; } if(!strcmp(cmd, "reorder")){ if(nargout>0){ pargout[0] = mex_quadtree_reorder(nargin, pargin); } DEBUG_STATISTICS; return; } USERERROR("unknown command", MUTILS_INVALID_PARAMETER); }
unified_shared_memory.c
// RUN: %libomptarget-compile-generic -fopenmp-version=51 // RUN: %libomptarget-run-generic 2>&1 \ // RUN: | %fcheck-generic // REQUIRES: unified_shared_memory #include <stdio.h> // The runtime considers unified shared memory to be always present. #pragma omp requires unified_shared_memory int main() { int i; // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: i) #pragma omp target map(present, alloc: i) ; // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK-NOT: Libomptarget #pragma omp target map(present, alloc: i) ; // CHECK: is present fprintf(stderr, "i is present\n"); return 0; }
pr69568.c
/* PR hsa/69568 */ typedef float float2 __attribute__ ((vector_size (8))); float2 *output; void __attribute__((noinline, noclone)) foo (int n, float2 *a, int workgroup_size) { int i; #pragma omp target map(from:a[:n]) firstprivate(n, workgroup_size) #pragma omp teams thread_limit(workgroup_size) #pragma omp distribute parallel for shared(a) firstprivate(n) private(i) for (i = 0; i < n; i++) { float2 v; v[0] = i; v[1] = 1+i; a[i] = v; } } int main (int argc, char **argv) { int n = 32; float2 *a = __builtin_malloc (sizeof (float2) * n); int i; __builtin_memset (a, 0, sizeof (float2) * n); foo (n, a, 32); for (i = 0; i < n; i++) { float2 v = a[i]; if (__builtin_abs (v[0] - i) > 0.1 || __builtin_abs (v[1] - i - 1) > 0.1) { __builtin_abort (); return 1; } } return 0; }
scaffold.c
/* Copyright 2007, 2008 Daniel Zerbino (zerbino@ebi.ac.uk) This file is part of Velvet. Velvet is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Velvet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Velvet; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif #include "globals.h" #include "graph.h" #include "concatenatedGraph.h" #include "recycleBin.h" #include "locallyCorrectedGraph.h" #include "passageMarker.h" #include "readSet.h" #include "utility.h" #include "scaffold.h" #define BLOCK_SIZE 100000 #define LN2 1.4 static int PEBBLE_ROUND_NUM = 0; typedef struct readOccurence_st ReadOccurence; static double paired_exp_fraction = 0.1; struct connection_st { Node *destination; Connection *right; Connection *left; Connection *twin; float distance; float variance; IDnum direct_count; IDnum paired_count; unsigned char clean; } ATTRIBUTE_PACKED; struct readOccurence_st { IDnum position; IDnum offset; IDnum nodeID; } ATTRIBUTE_PACKED; // Global params static IDnum UNRELIABLE_CONNECTION_CUTOFF = 5; // Global pointers static Graph *graph; static Connection **scaffold = NULL; static RecycleBin *connectionMemory = NULL; static boolean estimated[CATEGORIES + 1]; #ifdef _OPENMP #define READS_PER_LOCK 32 /* Array of reads locks */ static omp_lock_t *readsLocks = NULL; /* Array of per-node locks */ static omp_lock_t *nodeLocks = NULL; static void createReadsLocks() { Coordinate nbLocks; Coordinate lockIndex; if (readsLocks) free (readsLocks); nbLocks = 1 + sequenceCount(graph) / READS_PER_LOCK; readsLocks = mallocOrExit(nbLocks, omp_lock_t); #pragma omp parallel for for (lockIndex = 0; lockIndex < nbLocks; lockIndex++) omp_init_lock(readsLocks + lockIndex); } static inline void lockRead(IDnum readID) { omp_set_lock (readsLocks + readID / READS_PER_LOCK); } static inline void unLockRead(IDnum readID) { omp_unset_lock (readsLocks + readID / READS_PER_LOCK); } static void createNodeLocks(Graph *graph) { IDnum nbNodes; IDnum nodeIndex; nbNodes = nodeCount(graph) + 1; if (nodeLocks) free (nodeLocks); nodeLocks = mallocOrExit(nbNodes, omp_lock_t); #pragma omp parallel for for (nodeIndex = 0; nodeIndex < nbNodes; nodeIndex++) omp_init_lock(nodeLocks + nodeIndex); } /* Tries to avoid deadlocking */ static inline void lockTwoNodes(IDnum nodeID, IDnum node2ID) { if (nodeID < 0) nodeID = -nodeID; if (node2ID < 0) node2ID = -node2ID; /* Lock lowest ID first to avoid deadlocks */ if (nodeID < node2ID) { omp_set_lock (nodeLocks + nodeID); omp_set_lock (nodeLocks + node2ID); } else { omp_set_lock (nodeLocks + node2ID); omp_set_lock (nodeLocks + nodeID); } } static inline void unLockTwoNodes(IDnum nodeID, IDnum node2ID) { if (nodeID < 0) nodeID = -nodeID; if (node2ID < 0) node2ID = -node2ID; omp_unset_lock (nodeLocks + nodeID); omp_unset_lock (nodeLocks + node2ID); } #endif static Connection *allocateConnection() { Connection *connect; #ifdef _OPENMP #pragma omp critical { #endif if (connectionMemory == NULL) connectionMemory = newRecycleBin(sizeof(Connection), BLOCK_SIZE); connect = allocatePointer(connectionMemory); #ifdef _OPENMP } #endif connect->destination = NULL; connect->clean = false; return connect; } static void deallocateConnection(Connection * connect) { deallocatePointer(connectionMemory, connect); } Node * getConnectionDestination(Connection * connect) { return connect->destination; } Connection * getNextConnection(Connection * connect) { return connect->right; } Connection * getTwinConnection(Connection * connect) { return connect->twin; } Coordinate getConnectionDistance(Connection * connect) { return (Coordinate) connect->distance; } double getConnectionVariance(Connection * connect) { return connect->variance; } IDnum getConnectionDirectCount(Connection * connect) { return connect->direct_count; } IDnum getConnectionPairedCount(Connection * connect) { return connect->paired_count; } Connection * getConnection(Node * node) { return scaffold[getNodeID(node) + nodeCount(graph)]; } void incrementConnectionDistance(Connection * connect, Coordinate increment) { connect->distance += increment; } static double norm(double X) { return 0.4 * exp(-X * X / 2); } static double normInt(double X, double Y) { return (erf(0.7 * Y) - erf(0.7 * X)) / 2; } static IDnum expectedNumberOfConnections(IDnum IDA, Connection * connect, IDnum ** counts, Category cat) { Node *A = getNodeInGraph(graph, IDA); Node *B = connect->destination; double left, middle, right; Coordinate longLength, shortLength, D; IDnum longCount; double M, N, O, P; Coordinate mu = getInsertLength(graph, cat); double sigma = sqrt(getInsertLength_var(graph, cat)); double result; if (mu <= 0) return 0; if (getNodeLength(A) < getNodeLength(B)) { longLength = getNodeLength(B); shortLength = getNodeLength(A); longCount = counts[cat][getNodeID(B) + nodeCount(graph)]; } else { longLength = getNodeLength(A); shortLength = getNodeLength(B); longCount = counts[cat][IDA + nodeCount(graph)]; } D = getConnectionDistance(connect) - (longLength + shortLength) / 2; M = (D - mu) / sigma; N = (D + shortLength - mu) / sigma; O = (D + longLength - mu) / sigma; P = (D + shortLength + longLength - mu) / sigma; left = ((norm(M) - norm(N)) - M * normInt(M, N)) * sigma; middle = shortLength * normInt(N, O); right = ((norm(O) - norm(P)) - P * normInt(O, P)) * (-sigma); result = (longCount * (left + middle + right)) / longLength; if (result > 0) return (IDnum) result; else return 0; } void destroyConnection(Connection * connect, IDnum nodeID) { Connection *previous, *next; //velvetLog("Destroying connection from %li to %li\n", nodeID, getNodeID(connect->destination)); if (connect == NULL) return; previous = connect->left; next = connect->right; if (previous != NULL) previous->right = next; if (next != NULL) next->left = previous; if (scaffold[nodeID + nodeCount(graph)] == connect) scaffold[nodeID + nodeCount(graph)] = next; if (connect->twin != NULL) { connect->twin->twin = NULL; destroyConnection(connect->twin, getNodeID(connect->destination)); } deallocateConnection(connect); } static boolean testConnection(IDnum IDA, Connection *connect, IDnum **counts, boolean *shadows) { IDnum total = 0; Category cat; // Spare unique -> undetermined node connections if (!getUniqueness(connect->destination)) return true; // Destroy tenuous connections if (connect->paired_count + connect->direct_count < UNRELIABLE_CONNECTION_CUTOFF) return false; for (cat = 0; cat < CATEGORIES; cat++) if (!shadows[cat] || cat <= PEBBLE_ROUND_NUM) total += expectedNumberOfConnections(IDA, connect, counts, cat); // Remove inconsistent connections return connect->paired_count >= total * paired_exp_fraction; } static IDnum *computeReadToNodeCounts(Coordinate *totalCount) { IDnum nodeIndex; IDnum maxNodeIndex = 2 * nodeCount(graph) + 1; IDnum maxReadIndex = sequenceCount(graph) + 1; IDnum *readNodeCounts = callocOrExit(maxReadIndex, IDnum); unsigned char *readMarker = callocOrExit(1 + maxReadIndex / 8, unsigned char); Coordinate total = 0; velvetLog("Computing read to node mapping array sizes\n"); #ifdef _OPENMP #pragma omp parallel for reduction(+:total) #endif for (nodeIndex = 0; nodeIndex < maxNodeIndex; nodeIndex++) { Node *node; ShortReadMarker *nodeArray; IDnum nodeReadCount; IDnum readIndex; node = getNodeInGraph(graph, nodeIndex - nodeCount(graph)); if (node == NULL) continue; nodeArray = getNodeReads(node, graph); nodeReadCount = getNodeReadCount(node, graph); // Short reads for (readIndex = 0; readIndex < nodeReadCount; readIndex++) { ShortReadMarker *shortMarker; IDnum readID; shortMarker = getShortReadMarkerAtIndex(nodeArray, readIndex); readID = getShortReadMarkerID(shortMarker); #ifdef _OPENMP #pragma omp atomic #endif readNodeCounts[readID]++; total++; } } for (nodeIndex = 0; nodeIndex < maxNodeIndex; nodeIndex++) { Node *node; PassageMarkerI marker; node = getNodeInGraph(graph, nodeIndex - nodeCount(graph)); if (node == NULL) continue; // Long reads for (marker = getMarker(node); marker != NULL_IDX; marker = getNextInNode(marker)) { IDnum readIndex = getPassageMarkerSequenceID(marker);; if (readIndex < 0) continue; const unsigned int idx = readIndex / 8; const unsigned int mask = 1 << (readIndex & 7); if (readMarker[idx] & mask) continue; readNodeCounts[readIndex]++; total++; readMarker[idx] |= mask; } // Clean up marker array for (marker = getMarker(node); marker != NULL_IDX; marker = getNextInNode(marker)) { IDnum readIndex = getPassageMarkerSequenceID(marker); if (readIndex > 0) // No need to go bit-wise readMarker[readIndex / 8] = 0; } } *totalCount = total; free(readMarker); return readNodeCounts; } static ReadOccurence **allocateReadToNodeTables(IDnum * readNodeCounts, Coordinate totalCount, ReadOccurence **readNodesArray) { Coordinate offset = 0; IDnum readIndex; IDnum maxReadIndex = sequenceCount(graph) + 1; ReadOccurence **readNodes = callocOrExit(maxReadIndex, ReadOccurence *); *readNodesArray = callocOrExit(totalCount, ReadOccurence); for (readIndex = 1; readIndex < maxReadIndex; readIndex++) { if (readNodeCounts[readIndex] != 0) { readNodes[readIndex] = *readNodesArray + offset; offset += readNodeCounts[readIndex]; readNodeCounts[readIndex] = 0; } } return readNodes; } static void computePartialReadToNodeMappingShort(IDnum nodeID, ReadOccurence ** readNodes, IDnum * readNodeCounts) { ShortReadMarker *shortMarker; IDnum index, readIndex; ReadOccurence *readArray, *readOccurence; Node *node = getNodeInGraph(graph, nodeID); ShortReadMarker *nodeArray = getNodeReads(node, graph); IDnum nodeReadCount = getNodeReadCount(node, graph); for (index = 0; index < nodeReadCount; index++) { shortMarker = getShortReadMarkerAtIndex(nodeArray, index); readIndex = getShortReadMarkerID(shortMarker); readArray = readNodes[readIndex]; #ifdef _OPENMP lockRead(readIndex); #endif readOccurence = &readArray[readNodeCounts[readIndex]]; readOccurence->nodeID = nodeID; readOccurence->position = getShortReadMarkerPosition(shortMarker); readOccurence->offset = getShortReadMarkerOffset(shortMarker); readNodeCounts[readIndex]++; #ifdef _OPENMP unLockRead(readIndex); #endif } } static void computePartialReadToNodeMappingLong(IDnum nodeID, ReadOccurence ** readNodes, IDnum * readNodeCounts, unsigned char *readMarker, ReadSet * reads) { IDnum readIndex; ReadOccurence *readArray, *readOccurence; Node *node = getNodeInGraph(graph, nodeID); PassageMarkerI marker; for (marker = getMarker(node); marker != NULL_IDX; marker = getNextInNode(marker)) { readIndex = getPassageMarkerSequenceID(marker); if (readIndex <= 0 || reads->categories[readIndex - 1] == REFERENCE) continue; const unsigned int idx = readIndex / 8; const unsigned int mask = 1 << (readIndex & 7); if (readMarker[idx] & mask) { readArray = readNodes[readIndex]; readOccurence = &readArray[readNodeCounts[readIndex] - 1]; readOccurence->position = -1; readOccurence->offset = -1; } else { readArray = readNodes[readIndex]; readOccurence = &readArray[readNodeCounts[readIndex]]; readOccurence->nodeID = nodeID; readOccurence->position = getStartOffset(marker); readOccurence->offset = getPassageMarkerStart(marker); readNodeCounts[readIndex]++; readMarker[idx] |= mask; } } for (marker = getMarker(node); marker != NULL_IDX; marker = getNextInNode(marker)) { readIndex = getPassageMarkerSequenceID(marker); if (readIndex > 0) // No need to go bit-wise readMarker[readIndex / 8] = 0; } } static ReadOccurence **computeReadToNodeMappings(IDnum * readNodeCounts, ReadSet * reads, Coordinate totalCount, ReadOccurence **readNodesArray) { unsigned char *readMarker; IDnum nodeID; IDnum nodes = nodeCount(graph); ReadOccurence **readNodes = allocateReadToNodeTables(readNodeCounts, totalCount, readNodesArray); velvetLog("Computing read to node mappings\n"); #ifdef _OPENMP createReadsLocks(); #pragma omp parallel for #endif for (nodeID = -nodes; nodeID <= nodes; nodeID++) if (nodeID != 0 && getNodeInGraph(graph, nodeID)) computePartialReadToNodeMappingShort(nodeID, readNodes, readNodeCounts); #ifdef _OPENMP free(readsLocks); readsLocks = NULL; #endif readMarker = callocOrExit(1 + sequenceCount(graph) / 8, unsigned char); for (nodeID = -nodes; nodeID <= nodes; nodeID++) if (nodeID != 0 && getNodeInGraph(graph, nodeID)) computePartialReadToNodeMappingLong(nodeID, readNodes, readNodeCounts, readMarker, reads); free(readMarker); return readNodes; } static unsigned char * countCoOccurences(IDnum * coOccurencesCount, ReadOccurence ** readNodes, IDnum * readNodeCounts, IDnum * readPairs, Category * cats) { IDnum readIndex, readPairIndex; IDnum readNodeCount; IDnum readOccurenceIndex, readPairOccurenceIndex; ReadOccurence * readOccurence, *readPairOccurence; unsigned char *interestingReads = callocOrExit(1 + sequenceCount(graph) / 8, unsigned char); Category libID; for (libID = 0; libID < CATEGORIES + 1; libID++) coOccurencesCount[libID] = 0; for (readIndex = 0; readIndex < sequenceCount(graph); readIndex++) { // Eliminating dodgy, unpaired, already counted or user-specified reads if ( readPairs[readIndex] < readIndex || getInsertLength(graph, cats[readIndex]) > -1) continue; // Check for co-occurence // We know that for each read the read occurences are ordered by increasing node ID // Therefore one list is followed by increasing index, whereas the other is followed // by decreasing index libID = cats[readIndex] / 2; readPairIndex = readPairs[readIndex]; readOccurenceIndex = 0; readOccurence = readNodes[readIndex + 1]; readNodeCount = readNodeCounts[readIndex + 1]; readPairOccurenceIndex = readNodeCounts[readPairIndex + 1] - 1; readPairOccurence = &(readNodes[readPairIndex + 1][readPairOccurenceIndex]); while (readOccurenceIndex < readNodeCount && readPairOccurenceIndex >= 0) { if (readOccurence->nodeID == -readPairOccurence->nodeID) { if (readOccurence->position > 0 && readPairOccurence->position > 0) { coOccurencesCount[libID]++; interestingReads[readIndex / 8] |= 1 << (readIndex & 7); break; } else { readOccurence++; readOccurenceIndex++; readPairOccurence--; readPairOccurenceIndex--; } } else if (readOccurence->nodeID < -readPairOccurence->nodeID) { readOccurence++; readOccurenceIndex++; } else { readPairOccurence--; readPairOccurenceIndex--; } } } return interestingReads; } static void measureCoOccurences(IDnum ** coOccurences, unsigned char * interestingReads, ReadOccurence ** readNodes, IDnum * readNodeCounts, IDnum * readPairs, Category * cats) { IDnum coOccurencesIndex[CATEGORIES + 1]; IDnum observationIndex; IDnum readIndex, readPairIndex; IDnum readNodeCount; IDnum readOccurenceIndex, readPairOccurenceIndex; ReadOccurence * readOccurence, *readPairOccurence; Category libID; for (libID = 0; libID < CATEGORIES + 1; libID++) coOccurencesIndex[libID] = 0; for (readIndex = 0; readIndex < sequenceCount(graph); readIndex++) { // Eliminating dodgy, unpaired, already counted or user-specified reads if (!(interestingReads[readIndex / 8] & (1 << (readIndex & 7)))) continue; // Find co-occurence // We know that for each read the read occurences are ordered by increasing node ID libID = cats[readIndex]/2; readPairIndex = readPairs[readIndex]; observationIndex = coOccurencesIndex[libID]; readOccurence = readNodes[readIndex + 1]; readOccurenceIndex = 0; readNodeCount = readNodeCounts[readIndex + 1]; readPairOccurenceIndex = readNodeCounts[readPairIndex + 1] - 1; readPairOccurence = &(readNodes[readPairIndex + 1][readPairOccurenceIndex]); while (readOccurenceIndex < readNodeCount && readPairOccurenceIndex >= 0) { if (readOccurence->nodeID == -readPairOccurence->nodeID) { if (readOccurence->position > 0 && readPairOccurence->position > 0) { coOccurences[libID][observationIndex] = getNodeLength(getNodeInGraph(graph, readOccurence->nodeID)) + getWordLength(graph) - 1 - (readOccurence->position - readOccurence->offset) - (readPairOccurence->position - readPairOccurence->offset); coOccurencesIndex[libID]++; break; } else { readOccurence++; readOccurenceIndex++; readPairOccurence--; readPairOccurenceIndex--; } } else if (readOccurence->nodeID < -readPairOccurence->nodeID) { readOccurence++; readOccurenceIndex++; } else { readPairOccurence--; readPairOccurenceIndex--; } } } } int compareReadOccurences(const void *A, const void * B) { IDnum * cA = (IDnum *) A; IDnum * cB = (IDnum *) B; if (*cA > *cB) return 1; if (*cA == *cB) return 0; return -1; } static void estimateLibraryInsertLength(IDnum * coOccurences, IDnum coOccurencesCount, Category libID) { Coordinate median, variance; IDnum index; int counter = 0; qsort(coOccurences, coOccurencesCount, sizeof(IDnum), compareReadOccurences); median = coOccurences[coOccurencesCount / 2]; // Modified variance around the median (proxy for expected value) // interval censoring variance = 0; for (index = 0; index < coOccurencesCount; index++) { if (coOccurences[index] > 0 && coOccurences[index] < 5 * median) { variance += (coOccurences[index] - median) * (coOccurences[index] - median); counter++; } } if (counter) variance /= counter; else { variance = 0; for (index = 0; index < coOccurencesCount; index++) variance += (coOccurences[index] - median) * (coOccurences[index] - median); variance /= coOccurencesCount; } // To avoid subsequent divisions by zero if (variance == 0) variance = 1; velvetLog("Paired-end library %i has length: %lli, sample standard deviation: %lli\n", libID + 1, (long long) median, (long long) sqrt(variance)); setInsertLengths(graph, libID, median, sqrt(variance)); estimated[libID] = true; } static void estimateLibraryInsertLengths(IDnum ** coOccurences, IDnum * coOccurencesCounts) { Category libID; for (libID = 0; libID < CATEGORIES + 1; libID++) estimated[libID] = false; for (libID = 0; libID < CATEGORIES + 1; libID++) if (coOccurencesCounts[libID] > 0) estimateLibraryInsertLength(coOccurences[libID], coOccurencesCounts[libID], libID); } static void estimateMissingInsertLengths(ReadOccurence ** readNodes, IDnum * readNodeCounts, IDnum * readPairs, Category * cats) { IDnum * coOccurences[CATEGORIES + 1]; IDnum coOccurencesCounts[CATEGORIES + 1]; Category libID; velvetLog("Estimating library insert lengths...\n"); unsigned char * interestingReads = countCoOccurences(coOccurencesCounts, readNodes, readNodeCounts, readPairs, cats); for (libID = 0; libID < CATEGORIES + 1; libID++) coOccurences[libID] = callocOrExit(coOccurencesCounts[libID], IDnum); measureCoOccurences(coOccurences, interestingReads, readNodes, readNodeCounts, readPairs, cats); estimateLibraryInsertLengths(coOccurences, coOccurencesCounts); for (libID = 0; libID < CATEGORIES + 1; libID++) free(coOccurences[libID]); free(interestingReads); velvetLog("Done\n"); } static void createTwinConnection(IDnum nodeID, IDnum node2ID, Connection * connect) { Connection *newConnection = allocateConnection(); IDnum nodeIndex = nodeID + nodeCount(graph); // Fill in newConnection->distance = connect->distance; newConnection->variance = connect->variance; newConnection->direct_count = connect->direct_count; newConnection->paired_count = connect->paired_count; newConnection->destination = getNodeInGraph(graph, node2ID); // Batch to twin newConnection->twin = connect; connect->twin = newConnection; // Insert in scaffold newConnection->left = NULL; newConnection->right = scaffold[nodeIndex]; if (scaffold[nodeIndex] != NULL) scaffold[nodeIndex]->left = newConnection; scaffold[nodeIndex] = newConnection; } Connection *createNewConnection(IDnum nodeID, IDnum node2ID, IDnum direct_count, IDnum paired_count, Coordinate distance, double variance) { Node *destination = getNodeInGraph(graph, node2ID); IDnum nodeIndex = nodeID + nodeCount(graph); Connection *connect = allocateConnection(); // Fill in connect->destination = destination; connect->direct_count = direct_count; connect->paired_count = paired_count; connect->distance = (double) distance; connect->variance = variance; // Insert in scaffold connect->left = NULL; connect->right = scaffold[nodeIndex]; if (scaffold[nodeIndex] != NULL) scaffold[nodeIndex]->left = connect; scaffold[nodeIndex] = connect; // Event. pair up to twin if (getUniqueness(destination)) createTwinConnection(node2ID, nodeID, connect); else connect->twin = NULL; return connect; } void readjustConnection(Connection * connect, Coordinate distance, double variance, IDnum direct_count, IDnum paired_count) { connect->direct_count += direct_count; connect->paired_count += paired_count; connect->distance = (variance * connect->distance + distance * connect->variance) / (variance + connect->variance); connect->variance = (variance * connect->variance) / (variance + connect->variance); if (connect->twin != NULL) { connect->twin->distance = connect->distance; connect->twin->variance = connect->variance; connect->twin->direct_count = connect->direct_count; connect->twin->paired_count = connect->paired_count; } } ////////////////////////////////////// // Splay tree function for Connections ////////////////////////////////////// /* This function can be called only if K2 has a left child */ /* Perform a rotate between a node (K2) and its left child */ /* Update heights, then return new root */ static Connection *connectionSingleRotateWithLeft(Connection * K2) { Connection *K1; K1 = K2->left; K2->left = K1->right; K1->right = K2; return K1; /* New root */ } /* This function can be called only if K1 has a right child */ /* Perform a rotate between a node (K1) and its right child */ /* Update heights, then return new root */ static Connection *connectionSingleRotateWithRight(Connection * K1) { Connection *K2; K2 = K1->right; K1->right = K2->left; K2->left = K1; return K2; /* New root */ } /* Top-down splay procedure, */ /* not requiring destination to be in tree */ static Connection *splayConnection(Connection * T, IDnum nodeID) { Connection Header; Connection *LeftTreeMax, *RightTreeMin; if (T == NULL) return NULL; Header.left = Header.right = NULL; LeftTreeMax = RightTreeMin = &Header; while (nodeID != getNodeID(T->destination)) { if (nodeID < getNodeID(T->destination)) { if (T->left == NULL) break; if (nodeID < getNodeID(T->left->destination)) T = connectionSingleRotateWithLeft(T); if (T->left == NULL) break; /* Link right */ RightTreeMin->left = T; RightTreeMin = T; T = T->left; } else { if (T->right == NULL) break; if (nodeID > getNodeID(T->right->destination)) T = connectionSingleRotateWithRight(T); if (T->right == NULL) break; /* Link left */ LeftTreeMax->right = T; LeftTreeMax = T; T = T->right; } } /* while nodeID != T->destination */ /* Reassemble */ LeftTreeMax->right = T->left; RightTreeMin->left = T->right; T->left = Header.right; T->right = Header.left; return T; } static Connection* findOrCreateConnection(IDnum nodeID, IDnum node2ID) { Connection **T; Connection *newConnection; IDnum nodeIndex; nodeIndex = nodeID + nodeCount(graph); T = scaffold + nodeIndex; if (*T == NULL) { newConnection = allocateConnection(); newConnection->left = NULL; newConnection->right = NULL; *T = newConnection; } else { IDnum destID; *T = splayConnection(*T, node2ID); destID = getNodeID((*T)->destination); if (destID == node2ID) newConnection = *T; else { newConnection = allocateConnection(); if (node2ID < destID) { newConnection->left = (*T)->left; newConnection->right = *T; (*T)->left = NULL; } else if (node2ID > destID) { newConnection->right = (*T)->right; newConnection->left = *T; (*T)->right = NULL; } *T = newConnection; } } return newConnection; } static Connection* findConnection(IDnum nodeID, IDnum node2ID) { Connection **T; IDnum nodeIndex; nodeIndex = nodeID + nodeCount(graph); T = scaffold + nodeIndex; if (*T == NULL) return NULL; else { IDnum destID; *T = splayConnection(*T, node2ID); destID = getNodeID((*T)->destination); if (destID == node2ID) return *T; } return NULL; } RecycleBin *connectionStackMemory = NULL; typedef struct ConnectionStack_st ConnectionStack; struct ConnectionStack_st { Connection *connection; ConnectionStack *next; }; #ifdef _OPENMP static void initConnectionStackMemory(void) { int n = omp_get_max_threads(); #pragma omp critical { if (connectionStackMemory == NULL) connectionStackMemory = newRecycleBinArray(n, sizeof(ConnectionStack), BLOCK_SIZE); } } #endif static ConnectionStack *allocateConnectionStack(void) { #ifdef _OPENMP #ifdef DEBUG if (connectionStackMemory == NULL) { velvetLog("The memory for connection stack seems uninitialised, " "this is probably a bug, aborting.\n"); abort(); } #endif return allocatePointer(getRecycleBinInArray(connectionStackMemory, omp_get_thread_num())); #else if (connectionStackMemory == NULL) connectionStackMemory = newRecycleBin(sizeof(ConnectionStack), BLOCK_SIZE); return allocatePointer(connectionStackMemory); #endif } static void deallocateConnectionStack(ConnectionStack *stack) { #ifdef _OPENMP deallocatePointer(getRecycleBinInArray(connectionStackMemory, omp_get_thread_num()), stack); #else deallocatePointer(connectionStackMemory, stack); #endif } static void destroyConnectionStackMemory(void) { #ifdef _OPENMP destroyRecycleBinArray(connectionStackMemory); #else destroyRecycleBin(connectionStackMemory); #endif connectionStackMemory = NULL; } static void pushConnectionStack(ConnectionStack **stack, Connection *connection) { ConnectionStack *newElement; newElement = allocateConnectionStack(); newElement->connection = connection; newElement->next = *stack; *stack = newElement; } static Connection *popConnectionStack(ConnectionStack **stack) { ConnectionStack *nextElement; Connection *connection; if (*stack == NULL) return NULL; nextElement = (*stack)->next; connection = (*stack)->connection; deallocateConnectionStack(*stack); *stack = nextElement; return connection; } static void splayToList(Connection **connection) { ConnectionStack *stack = NULL; Connection *current; Connection *list = NULL; if (*connection == NULL) return; for (current = *connection; current != NULL; current = popConnectionStack(&stack)) { Connection *right; Connection *left; right = current->right; if (right != NULL) pushConnectionStack(&stack, right); left = current->left; if (left != NULL) pushConnectionStack(&stack, left); if (list != NULL) list->left = current; current->right = list; list = current; } list->left = NULL; *connection = list; } static void setAllConnectionsClean(void) { IDnum nodeID; IDnum nodes = nodeCount(graph); #ifdef _OPENMP #pragma omp parallel for #endif for (nodeID = 2 * nodes; nodeID >= 0; nodeID--) { ConnectionStack *stack = NULL; Connection **connect; Connection *current; connect = scaffold + nodeID; if (*connect == NULL) continue; for (current = *connect; current != NULL; current = popConnectionStack(&stack)) { Connection *right; Connection *left; current->clean = true; right = current->right; if (right != NULL) pushConnectionStack(&stack, right); left = current->left; if (left != NULL) pushConnectionStack(&stack, left); } } } static void fillNewConnectionInTree(Connection *connect, Node *destination, IDnum direct_count, IDnum paired_count, Coordinate distance, double variance) { connect->destination = destination; connect->direct_count = direct_count; connect->paired_count = paired_count; connect->distance = (double)distance; connect->variance = variance; } static void readjustConnectionInTree(Connection *connect, IDnum direct_count, IDnum paired_count, Coordinate distance, double variance) { connect->direct_count += direct_count; connect->paired_count += paired_count; connect->distance = (variance * connect->distance + distance * connect->variance) / (variance + connect->variance); connect->variance = (variance * connect->variance) / (variance + connect->variance); if (connect->twin != NULL) { connect->twin->direct_count = connect->direct_count; connect->twin->paired_count = connect->paired_count; connect->twin->distance = connect->distance; connect->twin->variance = connect->variance; } } static void createTwinConnectionInTree(IDnum nodeID, IDnum node2ID, Connection *connect) { Connection *newConnection; newConnection = findOrCreateConnection(nodeID, node2ID); if (newConnection->destination == NULL) { fillNewConnectionInTree(newConnection, getNodeInGraph(graph, node2ID), connect->direct_count, connect->paired_count, (Coordinate)connect->distance, connect->variance); // Batch to twin newConnection->twin = connect; connect->twin = newConnection; } else readjustConnectionInTree(newConnection, connect->direct_count, connect->paired_count, (Coordinate)connect->distance, connect->variance); } static void createConnection(IDnum nodeID, IDnum node2ID, IDnum direct_count, IDnum paired_count, Coordinate distance, double variance) { Connection *connect; if (getUniqueness(getNodeInGraph(graph, node2ID)) && node2ID < nodeID) { return; } #ifdef _OPENMP lockTwoNodes(nodeID, node2ID); #endif connect = findOrCreateConnection(nodeID, node2ID); if (connect->destination == NULL) { Node *destination = getNodeInGraph(graph, node2ID); fillNewConnectionInTree(connect, destination, direct_count, paired_count, distance, variance); if (getUniqueness(destination)) createTwinConnectionInTree(node2ID, nodeID, connect); else connect->twin = NULL; } else readjustConnectionInTree(connect, direct_count, paired_count, distance, variance); #ifdef _OPENMP unLockTwoNodes(nodeID, node2ID); #endif } static void projectFromSingleRead(Node * node, ReadOccurence * readOccurence, Coordinate position, Coordinate offset, Coordinate length) { Coordinate distance = 0; Node *target = getNodeInGraph(graph, -readOccurence->nodeID); double variance = 1; if (target == getTwinNode(node) || target == node) return; if (position < 0) { variance += getNodeLength(node) * getNodeLength(node) / 16; // distance += 0; } else { // variance += 0; distance += position - getNodeLength(node) / 2; } if (readOccurence->position < 0) { variance += getNodeLength(target) * getNodeLength(target) / 16; //distance += 0; } else { // variance += 0; distance += -readOccurence->position + getNodeLength(target) / 2; } if (readOccurence->offset < 0 || offset < 0) { variance += length * length / 16; //distance += 0; } else { // variance += 0; distance += readOccurence->offset - offset; } // Relative ordering if (offset > 0 && readOccurence->offset > 0) { if (offset < readOccurence->offset) { if (distance - getNodeLength(node)/2 - getNodeLength(target)/2 < -10) ; else if (distance < getNodeLength(node)/2 + getNodeLength(target)/2) createConnection(getNodeID(node), getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); else createConnection(getNodeID(node), getNodeID(target), 1, 0, distance, variance); } else if (offset > readOccurence->offset) { if (-distance - getNodeLength(node)/2 - getNodeLength(target)/2 < -10) ; else if (-distance < getNodeLength(node)/2 + getNodeLength(target)/2) createConnection(-getNodeID(node), -getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2 , variance); else createConnection(-getNodeID(node), -getNodeID(target), 1, 0, -distance, variance); } } else if (offset > 0 && position > 0) { if (distance - offset > -getNodeLength(node)/2 && distance - offset + length > getNodeLength(node)/2) createConnection(getNodeID(node), getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); else if (distance - offset < -getNodeLength(node)/2 && distance - offset + length < getNodeLength(node)/2) createConnection(-getNodeID(node), -getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); else { createConnection(getNodeID(node), getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); createConnection(-getNodeID(node), -getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); } } else if (readOccurence->offset > 0 && readOccurence->position > 0) { if (-distance - readOccurence->offset > -getNodeLength(target)/2 && -distance - readOccurence->offset + length > getNodeLength(target)/2) createConnection(-getNodeID(node), -getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); if (-distance - readOccurence->offset < -getNodeLength(target)/2 && -distance - readOccurence->offset + length < getNodeLength(target)/2) createConnection(getNodeID(node), getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); else { createConnection(getNodeID(node), getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); createConnection(-getNodeID(node), -getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); } } else { createConnection(getNodeID(node), getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); createConnection(-getNodeID(node), -getNodeID(target), 1, 0, getNodeLength(node)/2 + getNodeLength(target)/2, variance); } } static void projectFromReadPair(Node * node, ReadOccurence * readOccurence, Coordinate position, Coordinate offset, Coordinate insertLength, double insertVariance, boolean doMatePairs) { Coordinate distance = insertLength; Coordinate variance = insertVariance; Node *target = getNodeInGraph(graph, readOccurence->nodeID); IDnum nodeID; IDnum node2ID; if (target == getTwinNode(node) || target == node) return; nodeID = getNodeID(node); node2ID = getNodeID(target); if (getUniqueness(target) && node2ID < nodeID) return; // Check if a conflicting PE (or MP from a smaller size lib) connection // already exists if (doMatePairs) { Connection *reverseConnect; #ifdef _OPENMP lockTwoNodes(nodeID, node2ID); #endif reverseConnect = findConnection(-nodeID, -node2ID); #ifdef _OPENMP unLockTwoNodes(nodeID, node2ID); #endif if (reverseConnect != NULL && reverseConnect->clean && reverseConnect->paired_count + reverseConnect->direct_count >= UNRELIABLE_CONNECTION_CUTOFF) return; } if (position < 0) { variance += getNodeLength(node) * getNodeLength(node) / 16; // distance += 0; } else { // variance += 0; distance += position - offset - getNodeLength(node) / 2; } if (readOccurence->position < 0) { variance += getNodeLength(target) * getNodeLength(target) / 16; //distance += 0; } else { // variance += 0; distance += readOccurence->position - readOccurence->offset - getNodeLength(target) / 2; } if (distance - getNodeLength(node)/2 - getNodeLength(target)/2 < -6 * sqrt(insertVariance)) return; else if (distance < getNodeLength(node)/2 + getNodeLength(target)/2) distance = getNodeLength(node)/2 + getNodeLength(target)/2; createConnection(nodeID, node2ID, 0, 1, distance, variance); } static void projectFromShortRead(Node * node, ShortReadMarker * shortMarker, IDnum * readPairs, Category * cats, ReadOccurence ** readNodes, IDnum * readNodeCounts, ShortLength * lengths, boolean * shadows, boolean doMatePairs, Category thisCat) { IDnum index; IDnum readIndex = getShortReadMarkerID(shortMarker); ReadOccurence *readArray; IDnum readPairIndex; Category cat; Coordinate position = getShortReadMarkerPosition(shortMarker); Coordinate offset = getShortReadMarkerOffset(shortMarker); Coordinate length = lengths[getShortReadMarkerID(shortMarker) - 1]; Coordinate insertLength; double insertVariance; // Going through single-read information if (!doMatePairs && readNodeCounts[readIndex] > 1) { readArray = readNodes[readIndex]; for (index = 0; index < readNodeCounts[readIndex]; index++) projectFromSingleRead(node, &readArray[index], position, offset, length); } // Going through paired read information if (readPairs == NULL) return; readPairIndex = readPairs[readIndex - 1] + 1; if (readPairIndex == 0) return; cat = cats[readIndex - 1]; insertLength = getInsertLength(graph, cat); insertVariance = getInsertLength_var(graph, cat); cat /= 2; if (shadows[cat] && cat > PEBBLE_ROUND_NUM) return; if (!shadows[cat] && !doMatePairs) { readArray = readNodes[readPairIndex]; for (index = 0; index < readNodeCounts[readPairIndex]; index++) projectFromReadPair(node, &readArray[index], position, offset, insertLength, insertVariance, false); } else if (shadows[cat] && doMatePairs && cat == thisCat) { readArray = readNodes[readPairIndex]; for (index = 0; index < readNodeCounts[readPairIndex]; index++) projectFromReadPair(node, &readArray[index], position, offset, insertLength, insertVariance, true); } } static void projectFromLongRead(Node * node, PassageMarkerI marker, IDnum * readPairs, Category * cats, ReadOccurence ** readNodes, IDnum * readNodeCounts, ShortLength * lengths) { IDnum index; IDnum readIndex = getPassageMarkerSequenceID(marker); ReadOccurence *readArray; IDnum readPairIndex; Category cat; Coordinate position = getStartOffset(marker); Coordinate offset = getPassageMarkerStart(marker); Coordinate length = lengths[getPassageMarkerSequenceID(marker) - 1]; Coordinate insertLength; double insertVariance; // Going through single-read information if (readNodeCounts[readIndex] > 1 && position > 0) { readArray = readNodes[readIndex]; for (index = 0; index < readNodeCounts[readIndex]; index++) projectFromSingleRead(node, &readArray[index], position, offset, length); } // Going through paired read information if (readPairs == NULL) return; readPairIndex = readPairs[readIndex - 1] + 1; if (readPairIndex == 0) return; cat = cats[readIndex - 1]; insertLength = getInsertLength(graph, cat); insertVariance = getInsertLength_var(graph, cat); readArray = readNodes[readPairIndex]; for (index = 0; index < readNodeCounts[readPairIndex]; index++) projectFromReadPair(node, &readArray[index], position, offset, insertLength, insertVariance, false); } static void projectFromNode(IDnum nodeID, ReadOccurence ** readNodes, IDnum * readNodeCounts, IDnum * readPairs, Category * cats, boolean * dubious, ShortLength * lengths, boolean * shadows, boolean doMatePairs, Category thisCat) { IDnum index; ShortReadMarker *nodeArray, *shortMarker; PassageMarkerI marker; Node *node; IDnum nodeReadCount; node = getNodeInGraph(graph, nodeID); if (node == NULL || !getUniqueness(node)) return; nodeArray = getNodeReads(node, graph); nodeReadCount = getNodeReadCount(node, graph); for (index = 0; index < nodeReadCount; index++) { shortMarker = getShortReadMarkerAtIndex(nodeArray, index); if (dubious[getShortReadMarkerID(shortMarker) - 1]) continue; projectFromShortRead(node, shortMarker, readPairs, cats, readNodes, readNodeCounts, lengths, shadows, doMatePairs, thisCat); } if (!doMatePairs) for (marker = getMarker(node); marker != NULL_IDX; marker = getNextInNode(marker)) { if (getPassageMarkerSequenceID(marker) > 0) projectFromLongRead(node, marker, readPairs, cats, readNodes, readNodeCounts, lengths); } } static Connection **computeNodeToNodeMappings(ReadOccurence ** readNodes, IDnum * readNodeCounts, IDnum * readPairs, Category * cats, boolean * dubious, boolean * shadows, ShortLength * lengths) { IDnum nodeID; IDnum nodes = nodeCount(graph); struct timeval start, end, diff; Category cat; boolean hasShadow; scaffold = callocOrExit(2 * nodes + 1, Connection *); velvetLog("Computing direct node to node mappings\n"); gettimeofday(&start, NULL); #ifdef _OPENMP createNodeLocks(graph); int threads = omp_get_max_threads(); if (threads > 32) threads = 32; #pragma omp parallel for num_threads(threads) #endif for (nodeID = -nodes; nodeID <= nodes; nodeID++) { if (nodeID % 10000 == 0) velvetLog("Scaffolding node %li\n", (long) nodeID); projectFromNode(nodeID, readNodes, readNodeCounts, readPairs, cats, dubious, lengths, shadows, false, 0); } #ifdef _OPENMP initConnectionStackMemory(); #endif hasShadow = false; for (cat = 0; cat < CATEGORIES; cat++) if (shadows[cat]) { hasShadow = true; break; } if (hasShadow) { for (cat = 0; cat < CATEGORIES; cat++) { setAllConnectionsClean(); if (!shadows[cat]) continue; velvetLog("Scaffolding MP library %i\n", cat); #ifdef _OPENMP #pragma omp parallel for #endif for (nodeID = -nodes; nodeID <= nodes; nodeID++) projectFromNode(nodeID, readNodes, readNodeCounts, readPairs, cats, dubious, lengths, shadows, true, cat); } } #ifdef _OPENMP #pragma omp parallel for #endif for (nodeID = 2 * nodes; nodeID >= 0; nodeID--) splayToList(scaffold + nodeID); destroyConnectionStackMemory(); #ifdef _OPENMP free(nodeLocks); nodeLocks = NULL; #endif gettimeofday(&end, NULL); timersub(&end, &start, &diff); velvetLog(" === Nodes Scaffolded in %ld.%06ld s\n", (long) diff.tv_sec, (long) diff.tv_usec); PEBBLE_ROUND_NUM++; return scaffold; } static IDnum **countShortReads(Graph * graph, ReadSet * reads) { IDnum **counts = callocOrExit(CATEGORIES + 1, IDnum *); Category cat; IDnum nodeIndex; IDnum nodes = nodeCount(graph); Node *node; ShortReadMarker *array, *marker; IDnum readCount, readIndex, readID; // Allocate memory where needed for (cat = 0; cat <= CATEGORIES; cat++) if (getInsertLength(graph, cat) > 0) counts[cat] = callocOrExit(2 * nodeCount(graph) + 1, IDnum); // Start fillin' for (nodeIndex = 0; nodeIndex < 2 * nodes + 1; nodeIndex++) { node = getNodeInGraph(graph, nodeIndex - nodes); if (node == NULL || !getUniqueness(node)) continue; array = getNodeReads(node, graph); readCount = getNodeReadCount(node, graph); for (readIndex = 0; readIndex < readCount; readIndex++) { marker = getShortReadMarkerAtIndex(array, readIndex); readID = getShortReadMarkerID(marker); cat = reads->categories[readID - 1]; if (cat % 2 == 1 && counts[cat / 2] != NULL) counts[cat / 2][nodeIndex]++; } } return counts; } static void removeUnreliableConnections(ReadSet * reads, boolean *shadows) { IDnum maxNodeIndex = nodeCount(graph) * 2 + 1; IDnum index; Connection *connect, *next; Category cat; IDnum **counts = countShortReads(graph, reads); IDnum nodes = nodeCount(graph); for (index = 0; index < maxNodeIndex; index++) { for (connect = scaffold[index]; connect != NULL; connect = next) { next = connect->right; if (!testConnection(index - nodes, connect, counts, shadows)) destroyConnection(connect, index - nodes); } } // Free memory for (cat = 0; cat <= CATEGORIES; cat++) if (counts[cat]) free(counts[cat]); free(counts); } void printConnections(ReadSet * reads, boolean * shadows) { IDnum maxNodeIndex = nodeCount(graph) * 2 + 1; IDnum index; Connection *connect, *next; Node *node; IDnum **counts = countShortReads(graph, reads); IDnum nodes = nodeCount(graph); Category cat; puts("CONNECT IDA IDB dcount pcount dist lengthA lengthB var countA countB coordA coordB real exp distance test"); for (index = 0; index < maxNodeIndex; index++) { node = getNodeInGraph(graph, index - nodeCount(graph)); for (connect = scaffold[index]; connect != NULL; connect = next) { next = getNextConnection(connect); printf ("CONNECT %ld %ld %ld %ld %lld %lld %lld %f %ld %ld", (long) index - nodeCount(graph), (long) getNodeID(connect->destination), (long) connect->direct_count, (long) connect->paired_count, (long long) getConnectionDistance(connect), (long long) getNodeLength(node), (long long) getNodeLength(connect->destination), connect->variance, (long) getNodeReadCount(node, graph), (long) getNodeReadCount(connect->destination, graph)); if (markerCount(node) == 1 && markerCount(connect->destination) == 1) printf(" %lld %lld %lld", (long long) getPassageMarkerFinish(getMarker (node)), (long long) getPassageMarkerFinish(getMarker (connect-> destination)), (long long) (getPassageMarkerFinish (getMarker(node)) - getPassageMarkerFinish (getMarker (connect->destination)))); else printf(" ? ? ?"); printf(" %ld", (long) expectedNumberOfConnections(index - nodeCount (graph), connect, counts, 0)); printf(" %lld", (long long) (getConnectionDistance(connect) - (getNodeLength(node) + getNodeLength (connect->destination)) / 2)); if (testConnection(index - nodes, connect, counts, shadows)) puts(" OK"); else puts(" NG"); } } for (cat = 0; cat <= CATEGORIES; cat++) if (counts[cat]) free(counts[cat]); free(counts); } void buildScaffold(Graph * argGraph, ReadSet * reads, boolean * dubious, boolean * shadows) { IDnum *readPairs; Category *cats; IDnum *readNodeCounts; ReadOccurence **readNodes; ReadOccurence *readNodesArray = NULL; ShortLength *lengths = getSequenceLengths(reads, getWordLength(argGraph)); Coordinate totalCount = 0; graph = argGraph; readPairs = reads->mateReads; cats = reads->categories; // Prepare primary scaffold readNodeCounts = computeReadToNodeCounts(&totalCount); readNodes = computeReadToNodeMappings(readNodeCounts, reads, totalCount, &readNodesArray); estimateMissingInsertLengths(readNodes, readNodeCounts, readPairs, cats); scaffold = computeNodeToNodeMappings(readNodes, readNodeCounts, readPairs, cats, dubious, shadows, lengths); removeUnreliableConnections(reads, shadows); free(readNodesArray); free(readNodes); free(readNodeCounts); free(lengths); } //DEBUG void printScaffold(Graph * argGraph, ReadSet * reads, boolean * dubious, boolean * shadows) { IDnum *readPairs; Category *cats; IDnum *readNodeCounts; ReadOccurence **readNodes; ReadOccurence *readNodesArray = NULL; ShortLength *lengths = getSequenceLengths(reads, getWordLength(argGraph)); Coordinate totalCount = 0; graph = argGraph; readPairs = reads->mateReads; cats = reads->categories; // Prepare primary scaffold readNodeCounts = computeReadToNodeCounts(&totalCount); readNodes = computeReadToNodeMappings(readNodeCounts, reads, totalCount, &readNodesArray); estimateMissingInsertLengths(readNodes, readNodeCounts, readPairs, cats); scaffold = computeNodeToNodeMappings(readNodes, readNodeCounts, readPairs, cats, dubious, shadows, lengths); printConnections(reads, shadows); free(readNodesArray); free(readNodes); free(readNodeCounts); free(lengths); cleanScaffoldMemory(); } void setUnreliableConnectionCutoff(int val) { UNRELIABLE_CONNECTION_CUTOFF = (IDnum) val; } void cleanScaffoldMemory() { Category libID; for (libID = 0; libID < CATEGORIES + 1; libID++) if (estimated[libID]) setInsertLengths(graph, libID, -1, -1); destroyRecycleBin(connectionMemory); free(scaffold); connectionMemory = NULL; } void setPairedExpFraction(double x) { paired_exp_fraction = x; }
info.c
// RUN: %libomptarget-compile-nvptx64-nvidia-cuda -gline-tables-only && env LIBOMPTARGET_INFO=23 %libomptarget-run-nvptx64-nvidia-cuda 2>&1 | %fcheck-nvptx64-nvidia-cuda -allow-empty -check-prefix=INFO #include <stdio.h> #include <omp.h> #define N 64 int main() { int A[N]; int B[N]; int C[N]; int val = 1; // INFO: CUDA device 0 info: Device supports up to {{.*}} CUDA blocks and {{.*}} threads with a warp size of {{.*}} // INFO: Libomptarget device 0 info: Entering OpenMP data region at info.c:33:1 with 3 arguments: // INFO: Libomptarget device 0 info: alloc(A[0:64])[256] // INFO: Libomptarget device 0 info: tofrom(B[0:64])[256] // INFO: Libomptarget device 0 info: to(C[0:64])[256] // INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:33:1: // INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) RefCount Declaration // INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 C[0:64] at info.c:11:7 // INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 B[0:64] at info.c:10:7 // INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 A[0:64] at info.c:9:7 // INFO: Libomptarget device 0 info: Entering OpenMP kernel at info.c:34:1 with 1 arguments: // INFO: Libomptarget device 0 info: firstprivate(val)[4] // INFO: CUDA device 0 info: Launching kernel {{.*}} with {{.*}} and {{.*}} threads in {{.*}} mode // INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:34:1: // INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) RefCount Declaration // INFO: Libomptarget device 0 info: 0x{{.*}} 0x{{.*}} 256 1 C[0:64] at info.c:11:7 // INFO: Libomptarget device 0 info: 0x{{.*}} 0x{{.*}} 256 1 B[0:64] at info.c:10:7 // INFO: Libomptarget device 0 info: 0x{{.*}} 0x{{.*}} 256 1 A[0:64] at info.c:9:7 // INFO: Libomptarget device 0 info: Exiting OpenMP data region at info.c:33:1 #pragma omp target data map(alloc:A[0:N]) map(tofrom:B[0:N]) map(to:C[0:N]) #pragma omp target firstprivate(val) { val = 1; } return 0; }
DRB003-antidep2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A two-level loop nest with loop carried anti-dependence on the outer level. Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18 */ #include <stdio.h> int main(int argc,char *argv[]) { int i, j; int len = 20; double a[20][20]; for (i=0; i< len; i++) for (j=0; j<len; j++) a[i][j] = 0.5; #pragma omp parallel for private(j) for (i = 0; i < len - 1; i += 1) { for (j = 0; j < len ; j += 1) { a[i][j] += a[i + 1][j]; } } printf ("a[10][10]=%f\n", a[10][10]); return 0; }
omp-low.c
/* Lowering pass for OpenMP directives. Converts OpenMP directives into explicit calls to the runtime library (libgomp) and data marshalling to implement data sharing and copying clauses. Contributed by Diego Novillo <dnovillo@redhat.com> Copyright (C) 2005, 2006 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "tree-gimple.h" #include "tree-inline.h" #include "langhooks.h" #include "diagnostic.h" #include "tree-flow.h" #include "timevar.h" #include "flags.h" #include "function.h" #include "expr.h" #include "toplev.h" #include "tree-pass.h" #include "ggc.h" #include "except.h" /* Lowering of OpenMP parallel and workshare constructs proceeds in two phases. The first phase scans the function looking for OMP statements and then for variables that must be replaced to satisfy data sharing clauses. The second phase expands code for the constructs, as well as re-gimplifying things when variables have been replaced with complex expressions. Final code generation is done by pass_expand_omp. The flowgraph is scanned for parallel regions which are then moved to a new function, to be invoked by the thread library. */ /* Context structure. Used to store information about each parallel directive in the code. */ typedef struct omp_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; /* The tree of contexts corresponding to the encountered constructs. */ struct omp_context *outer; tree stmt; /* Map variables to fields in a structure that allows communication between sending and receiving threads. */ splay_tree field_map; tree record_type; tree sender_decl; tree receiver_decl; /* A chain of variables to add to the top-level block surrounding the construct. In the case of a parallel, this is in the child function. */ tree block_vars; /* What to do with variables with implicitly determined sharing attributes. */ enum omp_clause_default_kind default_kind; /* Nesting depth of this context. Used to beautify error messages re invalid gotos. The outermost ctx is depth 1, with depth 0 being reserved for the main body of the function. */ int depth; /* True if this parallel directive is nested within another. */ bool is_nested; } omp_context; /* A structure describing the main elements of a parallel loop. */ struct omp_for_data { tree v, n1, n2, step, chunk_size, for_stmt; enum tree_code cond_code; tree pre; bool have_nowait, have_ordered; enum omp_clause_schedule_kind sched_kind; }; static splay_tree all_contexts; static int parallel_nesting_level; struct omp_region *root_omp_region; static void scan_omp (tree *, omp_context *); static void lower_omp (tree *, omp_context *); static tree lookup_decl_in_outer_ctx (tree, omp_context *); static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *); /* Find an OpenMP clause of type KIND within CLAUSES. */ static tree find_omp_clause (tree clauses, enum omp_clause_code kind) { for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses)) if (OMP_CLAUSE_CODE (clauses) == kind) return clauses; return NULL_TREE; } /* Return true if CTX is for an omp parallel. */ static inline bool is_parallel_ctx (omp_context *ctx) { return TREE_CODE (ctx->stmt) == OMP_PARALLEL; } /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Extract the header elements of parallel loop FOR_STMT and store them into *FD. */ static void extract_omp_for_data (tree for_stmt, struct omp_for_data *fd) { tree t; fd->for_stmt = for_stmt; fd->pre = NULL; t = OMP_FOR_INIT (for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); fd->v = TREE_OPERAND (t, 0); gcc_assert (DECL_P (fd->v)); gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE); fd->n1 = TREE_OPERAND (t, 1); t = OMP_FOR_COND (for_stmt); fd->cond_code = TREE_CODE (t); gcc_assert (TREE_OPERAND (t, 0) == fd->v); fd->n2 = TREE_OPERAND (t, 1); switch (fd->cond_code) { case LT_EXPR: case GT_EXPR: break; case LE_EXPR: fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = LT_EXPR; break; case GE_EXPR: fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2, build_int_cst (TREE_TYPE (fd->n2), 1)); fd->cond_code = GT_EXPR; break; default: gcc_unreachable (); } t = OMP_FOR_INCR (fd->for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (t, 0) == fd->v); t = TREE_OPERAND (t, 1); gcc_assert (TREE_OPERAND (t, 0) == fd->v); switch (TREE_CODE (t)) { case PLUS_EXPR: fd->step = TREE_OPERAND (t, 1); break; case MINUS_EXPR: fd->step = TREE_OPERAND (t, 1); fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step); break; default: gcc_unreachable (); } fd->have_nowait = fd->have_ordered = false; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->chunk_size = NULL_TREE; for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; break; case OMP_CLAUSE_SCHEDULE: fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); break; default: break; } if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb) { struct omp_for_data fd; tree par_stmt, ws_stmt; par_stmt = last_stmt (par_entry_bb); ws_stmt = last_stmt (ws_entry_bb); if (TREE_CODE (ws_stmt) == OMP_SECTIONS) return true; gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR); extract_omp_for_data (ws_stmt, &fd); /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.n1) || !is_gimple_min_invariant (fd.n2) || !is_gimple_min_invariant (fd.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static tree get_ws_args_for (tree ws_stmt) { tree t; if (TREE_CODE (ws_stmt) == OMP_FOR) { struct omp_for_data fd; tree ws_args; extract_omp_for_data (ws_stmt, &fd); ws_args = NULL_TREE; if (fd.chunk_size) { t = fold_convert (long_integer_type_node, fd.chunk_size); ws_args = tree_cons (NULL, t, ws_args); } t = fold_convert (long_integer_type_node, fd.step); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n2); ws_args = tree_cons (NULL, t, ws_args); t = fold_convert (long_integer_type_node, fd.n1); ws_args = tree_cons (NULL, t, ws_args); return ws_args; } else if (TREE_CODE (ws_stmt) == OMP_SECTIONS) { basic_block bb = bb_for_stmt (ws_stmt); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs)); t = tree_cons (NULL, t, NULL); return t; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != OMP_PARALLEL || (region->inner->type != OMP_FOR && region->inner->type != OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb) && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { tree ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = OMP_FOR_CLAUSES (ws_stmt); tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC || find_omp_clause (clauses, OMP_CLAUSE_ORDERED)) { region->is_combined_parallel = false; region->inner->is_combined_parallel = false; return; } } region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (ws_stmt); } } /* Return true if EXPR is variable sized. */ static inline bool is_variable_sized (tree expr) { return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr))); } /* Return true if DECL is a reference type. */ static inline bool is_reference (tree decl) { return lang_hooks.decls.omp_privatize_by_reference (decl); } /* Lookup variables in the decl or field splay trees. The "maybe" form allows for the variable form to not have been entered, otherwise we assert that the variable must have been entered. */ static inline tree lookup_decl (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_decl (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } static inline tree lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } /* Return true if DECL should be copied by pointer. SHARED_P is true if DECL is to be shared. */ static bool use_pointer_for_field (tree decl, bool shared_p) { if (AGGREGATE_TYPE_P (TREE_TYPE (decl))) return true; /* We can only use copy-in/copy-out semantics for shared variables when we know the value is not accessible from an outer scope. */ if (shared_p) { /* ??? Trivially accessible from anywhere. But why would we even be passing an address in this case? Should we simply assert this to be false, or should we have a cleanup pass that removes these from the list of mappings? */ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) return true; /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell without analyzing the expression whether or not its location is accessible to anyone else. In the case of nested parallel regions it certainly may be. */ if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl)) return true; /* Do not use copy-in/copy-out for variables that have their address taken. */ if (TREE_ADDRESSABLE (decl)) return true; } return false; } /* Construct a new automatic decl similar to VAR. */ static tree omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx) { tree copy = build_decl (VAR_DECL, name, type); TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var); DECL_COMPLEX_GIMPLE_REG_P (copy) = DECL_COMPLEX_GIMPLE_REG_P (var); DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var); DECL_IGNORED_P (copy) = DECL_IGNORED_P (var); TREE_USED (copy) = 1; DECL_CONTEXT (copy) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; TREE_CHAIN (copy) = ctx->block_vars; ctx->block_vars = copy; return copy; } static tree omp_copy_decl_1 (tree var, omp_context *ctx) { return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx); } /* Build tree nodes to access the field for VAR on the receiver side. */ static tree build_receiver_ref (tree var, bool by_ref, omp_context *ctx) { tree x, field = lookup_field (var, ctx); /* If the receiver record type was remapped in the child function, remap the field into the new record type. */ x = maybe_lookup_field (field, ctx); if (x != NULL) field = x; x = build_fold_indirect_ref (ctx->receiver_decl); x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL); if (by_ref) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access VAR in the scope outer to CTX. In the case of a parallel, this is a component reference; for workshare constructs this is some variable. */ static tree build_outer_var_ref (tree var, omp_context *ctx) { tree x; if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))) x = var; else if (is_variable_sized (var)) { x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0); x = build_outer_var_ref (x, ctx); x = build_fold_indirect_ref (x); } else if (is_parallel_ctx (ctx)) { bool by_ref = use_pointer_for_field (var, false); x = build_receiver_ref (var, by_ref, ctx); } else if (ctx->outer) x = lookup_decl (var, ctx->outer); else if (is_reference (var)) /* This can happen with orphaned constructs. If var is reference, it is possible it is shared and as such valid. */ x = var; else gcc_unreachable (); if (is_reference (var)) x = build_fold_indirect_ref (x); return x; } /* Build tree nodes to access the field for VAR on the sender side. */ static tree build_sender_ref (tree var, omp_context *ctx) { tree field = lookup_field (var, ctx); return build3 (COMPONENT_REF, TREE_TYPE (field), ctx->sender_decl, field, NULL); } /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */ static void install_var_field (tree var, bool by_ref, omp_context *ctx) { tree field, type; gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var)); type = TREE_TYPE (var); if (by_ref) type = build_pointer_type (type); field = build_decl (FIELD_DECL, DECL_NAME (var), type); /* Remember what variable this field was created for. This does have a side effect of making dwarf2out ignore this member, so for helpful debugging we clear it later in delete_omp_context. */ DECL_ABSTRACT_ORIGIN (field) = var; insert_field_into_struct (ctx->record_type, field); splay_tree_insert (ctx->field_map, (splay_tree_key) var, (splay_tree_value) field); } static tree install_var_local (tree var, omp_context *ctx) { tree new_var = omp_copy_decl_1 (var, ctx); insert_decl_map (&ctx->cb, var, new_var); return new_var; } /* Adjust the replacement for DECL in CTX for the new context. This means copying the DECL_VALUE_EXPR, and fixing up the type. */ static void fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug) { tree new_decl, size; new_decl = lookup_decl (decl, ctx); TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug) && DECL_HAS_VALUE_EXPR_P (decl)) { tree ve = DECL_VALUE_EXPR (decl); walk_tree (&ve, copy_body_r, &ctx->cb, NULL); SET_DECL_VALUE_EXPR (new_decl, ve); DECL_HAS_VALUE_EXPR_P (new_decl) = 1; } if (!TREE_CONSTANT (DECL_SIZE (new_decl))) { size = remap_decl (DECL_SIZE (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE (TREE_TYPE (new_decl)); DECL_SIZE (new_decl) = size; size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl)); DECL_SIZE_UNIT (new_decl) = size; } } /* The callback for remap_decl. Search all containing contexts for a mapping of the variable; this avoids having to duplicate the splay tree ahead of time. We know a mapping doesn't already exist in the given context. Create new mappings to implement default semantics. */ static tree omp_copy_decl (tree var, copy_body_data *cb) { omp_context *ctx = (omp_context *) cb; tree new_var; if (TREE_CODE (var) == LABEL_DECL) { new_var = create_artificial_label (); DECL_CONTEXT (new_var) = current_function_decl; insert_decl_map (&ctx->cb, var, new_var); return new_var; } while (!is_parallel_ctx (ctx)) { ctx = ctx->outer; if (ctx == NULL) return var; new_var = maybe_lookup_decl (var, ctx); if (new_var) return new_var; } if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn) return var; return error_mark_node; } /* Return the parallel region associated with STMT. */ /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, tree_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ struct omp_region * new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent) { struct omp_region *region = xcalloc (1, sizeof (*region)); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void free_omp_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* Create a new context, with OUTER_CTX being the surrounding context. */ static omp_context * new_omp_context (tree stmt, omp_context *outer_ctx) { omp_context *ctx = XCNEW (omp_context); splay_tree_insert (all_contexts, (splay_tree_key) stmt, (splay_tree_value) ctx); ctx->stmt = stmt; if (outer_ctx) { ctx->outer = outer_ctx; ctx->cb = outer_ctx->cb; ctx->cb.block = NULL; ctx->depth = outer_ctx->depth + 1; } else { ctx->cb.src_fn = current_function_decl; ctx->cb.dst_fn = current_function_decl; ctx->cb.src_node = cgraph_node (current_function_decl); ctx->cb.dst_node = ctx->cb.src_node; ctx->cb.src_cfun = cfun; ctx->cb.copy_decl = omp_copy_decl; ctx->cb.eh_region = -1; ctx->cb.transform_call_graph_edges = CB_CGE_MOVE; ctx->depth = 1; } ctx->cb.decl_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); return ctx; } /* Destroy a omp_context data structures. Called through the splay tree value delete callback. */ static void delete_omp_context (splay_tree_value value) { omp_context *ctx = (omp_context *) value; splay_tree_delete (ctx->cb.decl_map); if (ctx->field_map) splay_tree_delete (ctx->field_map); /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before it produces corrupt debug information. */ if (ctx->record_type) { tree t; for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } XDELETE (ctx); } /* Fix up RECEIVER_DECL with a type that has been remapped to the child context. */ static void fixup_child_record_type (omp_context *ctx) { tree f, type = ctx->record_type; /* ??? It isn't sufficient to just call remap_type here, because variably_modified_type_p doesn't work the way we expect for record types. Testing each field for whether it needs remapping and creating a new record by hand works, however. */ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) break; if (f) { tree name, new_fields = NULL; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (ctx->record_type)); name = build_decl (TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb); TREE_CHAIN (new_f) = new_fields; new_fields = new_f; /* Arrange to be able to look up the receiver field given the sender field. */ splay_tree_insert (ctx->field_map, (splay_tree_key) f, (splay_tree_value) new_f); } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); } TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type); } /* Instantiate decls as necessary in CTX to satisfy the data sharing specified by CLAUSES. */ static void scan_sharing_clauses (tree clauses, omp_context *ctx) { tree c, decl; bool scan_array_reductions = false; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { bool by_ref; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: decl = OMP_CLAUSE_DECL (c); if (!is_variable_sized (decl)) install_var_local (decl, ctx); break; case OMP_CLAUSE_SHARED: gcc_assert (is_parallel_ctx (ctx)); decl = OMP_CLAUSE_DECL (c); gcc_assert (!is_variable_sized (decl)); by_ref = use_pointer_for_field (decl, true); /* Global variables don't need to be copied, the receiver side will use them directly. */ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) break; if (! TREE_READONLY (decl) || TREE_ADDRESSABLE (decl) || by_ref || is_reference (decl)) { install_var_field (decl, by_ref, ctx); install_var_local (decl, ctx); break; } /* We don't need to copy const scalar vars back. */ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE); goto do_private; case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); do_private: if (is_variable_sized (decl)) break; else if (is_parallel_ctx (ctx) && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) { by_ref = use_pointer_for_field (decl, false); install_var_field (decl, by_ref, ctx); } install_var_local (decl, ctx); break; case OMP_CLAUSE_COPYPRIVATE: if (ctx->outer) scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer); /* FALLTHRU */ case OMP_CLAUSE_COPYIN: decl = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (decl, false); install_var_field (decl, by_ref, ctx); break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: if (ctx->outer) scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) install_var_local (decl, ctx); fixup_remapped_decl (decl, ctx, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_PRIVATE_DEBUG (c)); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) scan_array_reductions = true; break; case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) fixup_remapped_decl (decl, ctx, false); break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; default: gcc_unreachable (); } } if (scan_array_reductions) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx); scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx); } } /* Create a new name for omp child function. Returns an identifier. */ static GTY(()) unsigned int tmp_ompfn_id_num; static tree create_omp_child_function_name (void) { tree name = DECL_ASSEMBLER_NAME (current_function_decl); size_t len = IDENTIFIER_LENGTH (name); char *tmp_name, *prefix; prefix = alloca (len + sizeof ("_omp_fn")); memcpy (prefix, IDENTIFIER_POINTER (name), len); strcpy (prefix + len, "_omp_fn"); #ifndef NO_DOT_IN_LABEL prefix[len] = '.'; #elif !defined NO_DOLLAR_IN_LABEL prefix[len] = '$'; #endif ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++); return get_identifier (tmp_name); } /* Build a decl for the omp child function. It'll not contain a body yet, just the bare decl. */ static void create_omp_child_function (omp_context *ctx) { tree decl, type, name, t; name = create_omp_child_function_name (); type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (FUNCTION_DECL, name, type); decl = lang_hooks.decls.pushdecl (decl); ctx->cb.dst_fn = decl; TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 0; TREE_PUBLIC (decl) = 0; DECL_UNINLINABLE (decl) = 1; DECL_EXTERNAL (decl) = 0; DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = make_node (BLOCK); t = build_decl (RESULT_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_RESULT (decl) = t; t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; DECL_ARGUMENTS (decl) = t; ctx->receiver_decl = t; /* Allocate memory for the function structure. The call to allocate_struct_function clobbers CFUN, so we need to restore it afterward. */ allocate_struct_function (decl); DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt); cfun->function_end_locus = EXPR_LOCATION (ctx->stmt); cfun = ctx->cb.src_cfun; } /* Scan an OpenMP parallel directive. */ static void scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree name; /* Ignore parallel directives with empty bodies, unless there are copyin clauses. */ if (optimize > 0 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p)) && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL) { *stmt_p = build_empty_stmt (); return; } ctx = new_omp_context (*stmt_p, outer_ctx); if (parallel_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx); OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn; scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx); scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = ctx->receiver_decl = NULL; else { layout_type (ctx->record_type); fixup_child_record_type (ctx); } } /* Scan an OpenMP loop directive. */ static void scan_omp_for (tree *stmt_p, omp_context *outer_ctx) { omp_context *ctx; tree stmt; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx); scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx); scan_omp (&OMP_FOR_INIT (stmt), ctx); scan_omp (&OMP_FOR_COND (stmt), ctx); scan_omp (&OMP_FOR_INCR (stmt), ctx); scan_omp (&OMP_FOR_BODY (stmt), ctx); } /* Scan an OpenMP sections directive. */ static void scan_omp_sections (tree *stmt_p, omp_context *outer_ctx) { tree stmt; omp_context *ctx; stmt = *stmt_p; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx); scan_omp (&OMP_SECTIONS_BODY (stmt), ctx); } /* Scan an OpenMP single directive. */ static void scan_omp_single (tree *stmt_p, omp_context *outer_ctx) { tree stmt = *stmt_p; omp_context *ctx; tree name; ctx = new_omp_context (stmt, outer_ctx); ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_copy_s"); name = build_decl (TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx); scan_omp (&OMP_SINGLE_BODY (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = NULL; else layout_type (ctx->record_type); } /* Check OpenMP nesting restrictions. */ static void check_omp_nesting_restrictions (tree t, omp_context *ctx) { switch (TREE_CODE (t)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: case OMP_ORDERED: case OMP_MASTER: warning (0, "work-sharing region may not be closely nested inside " "of work-sharing, critical, ordered or master region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_MASTER: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_FOR: case OMP_SECTIONS: case OMP_SINGLE: warning (0, "master region may not be closely nested inside " "of work-sharing region"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_ORDERED: for (; ctx != NULL; ctx = ctx->outer) switch (TREE_CODE (ctx->stmt)) { case OMP_CRITICAL: warning (0, "ordered region may not be closely nested inside " "of critical region"); return; case OMP_FOR: if (find_omp_clause (OMP_CLAUSES (ctx->stmt), OMP_CLAUSE_ORDERED) == NULL) warning (0, "ordered region must be closely nested inside " "a loop region with an ordered clause"); return; case OMP_PARALLEL: return; default: break; } break; case OMP_CRITICAL: for (; ctx != NULL; ctx = ctx->outer) if (TREE_CODE (ctx->stmt) == OMP_CRITICAL && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt)) { warning (0, "critical region may not be nested inside a critical " "region with the same name"); return; } break; default: break; } } /* Callback for walk_stmts used to scan for OpenMP directives at TP. */ static tree scan_omp_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; omp_context *ctx = wi->info; tree t = *tp; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); /* Check the OpenMP nesting restrictions. */ if (OMP_DIRECTIVE_P (t) && ctx != NULL) check_omp_nesting_restrictions (t, ctx); *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: parallel_nesting_level++; scan_omp_parallel (tp, ctx); parallel_nesting_level--; break; case OMP_FOR: scan_omp_for (tp, ctx); break; case OMP_SECTIONS: scan_omp_sections (tp, ctx); break; case OMP_SINGLE: scan_omp_single (tp, ctx); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: ctx = new_omp_context (*tp, ctx); scan_omp (&OMP_BODY (*tp), ctx); break; case BIND_EXPR: { tree var; *walk_subtrees = 1; for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var)) insert_decl_map (&ctx->cb, var, var); } break; case VAR_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: if (ctx) *tp = remap_decl (t, &ctx->cb); break; default: if (ctx && TYPE_P (t)) *tp = remap_type (t, &ctx->cb); else if (!DECL_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } /* Scan all the statements starting at STMT_P. CTX contains context information about the OpenMP directives and clauses found during the scan. */ static void scan_omp (tree *stmt_p, omp_context *ctx) { location_t saved_location; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = scan_omp_1; wi.info = ctx; wi.want_bind_expr = (ctx != NULL); wi.want_locations = true; saved_location = input_location; walk_stmts (&wi, stmt_p); input_location = saved_location; } /* Re-gimplification and code generation routines. */ /* Build a call to GOMP_barrier. */ static void build_omp_barrier (tree *stmt_list) { tree t; t = built_in_decls[BUILT_IN_GOMP_BARRIER]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, stmt_list); } /* If a context was created for STMT when it was scanned, return it. */ static omp_context * maybe_lookup_ctx (tree stmt) { splay_tree_node n; n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt); return n ? (omp_context *) n->value : NULL; } /* Find the mapping for DECL in CTX or the immediately enclosing context that has a mapping for DECL. If CTX is a nested parallel directive, we may have to use the decl mappings created in CTX's parent context. Suppose that we have the following parallel nesting (variable UIDs showed for clarity): iD.1562 = 0; #omp parallel shared(iD.1562) -> outer parallel iD.1562 = iD.1562 + 1; #omp parallel shared (iD.1562) -> inner parallel iD.1562 = iD.1562 - 1; Each parallel structure will create a distinct .omp_data_s structure for copying iD.1562 in/out of the directive: outer parallel .omp_data_s.1.i -> iD.1562 inner parallel .omp_data_s.2.i -> iD.1562 A shared variable mapping will produce a copy-out operation before the parallel directive and a copy-in operation after it. So, in this case we would have: iD.1562 = 0; .omp_data_o.1.i = iD.1562; #omp parallel shared(iD.1562) -> outer parallel .omp_data_i.1 = &.omp_data_o.1 .omp_data_i.1->i = .omp_data_i.1->i + 1; .omp_data_o.2.i = iD.1562; -> ** #omp parallel shared(iD.1562) -> inner parallel .omp_data_i.2 = &.omp_data_o.2 .omp_data_i.2->i = .omp_data_i.2->i - 1; ** This is a problem. The symbol iD.1562 cannot be referenced inside the body of the outer parallel region. But since we are emitting this copy operation while expanding the inner parallel directive, we need to access the CTX structure of the outer parallel directive to get the correct mapping: .omp_data_o.2.i = .omp_data_i.1->i Since there may be other workshare or parallel directives enclosing the parallel directive, it may be necessary to walk up the context parent chain. This is not a problem in general because nested parallelism happens only rarely. */ static tree lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t; omp_context *up; gcc_assert (ctx->is_nested); for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); gcc_assert (t || is_global_var (decl)); return t ? t : decl; } /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found in outer contexts. */ static tree maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t = NULL; omp_context *up; if (ctx->is_nested) for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); return t ? t : decl; } /* Construct the initialization value for reduction CLAUSE. */ tree omp_reduction_init (tree clause, tree type) { switch (OMP_CLAUSE_REDUCTION_CODE (clause)) { case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_XOR_EXPR: case NE_EXPR: return fold_convert (type, integer_zero_node); case MULT_EXPR: case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: case EQ_EXPR: return fold_convert (type, integer_one_node); case BIT_AND_EXPR: return fold_convert (type, integer_minus_one_node); case MAX_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max, min; if (HONOR_INFINITIES (TYPE_MODE (type))) { real_inf (&max); real_arithmetic (&min, NEGATE_EXPR, &max, NULL); } else real_maxval (&min, 1, TYPE_MODE (type)); return build_real (type, min); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MIN_VALUE (type); } case MIN_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max; if (HONOR_INFINITIES (TYPE_MODE (type))) real_inf (&max); else real_maxval (&max, 0, TYPE_MODE (type)); return build_real (type, max); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MAX_VALUE (type); } default: gcc_unreachable (); } } /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN, from the receiver (aka child) side and initializers for REFERENCE_TYPE private variables. Initialization statements go in ILIST, while calls to destructors go in DLIST. */ static void lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist, omp_context *ctx) { tree_stmt_iterator diter; tree c, dtor, copyin_seq, x, args, ptr; bool copyin_by_ref = false; bool lastprivate_firstprivate = false; int pass; *dlist = alloc_stmt_list (); diter = tsi_start (*dlist); copyin_seq = NULL; /* Do all the fixed sized types in the first pass, and the variable sized types in the second pass. This makes sure that the scalar arguments to the variable sized types are processed before we use them in the variable sized operations. */ for (pass = 0; pass < 2; ++pass) { for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); tree var, new_var; bool by_ref; switch (c_kind) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_DEBUG (c)) continue; break; case OMP_CLAUSE_SHARED: if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL) { gcc_assert (is_global_var (OMP_CLAUSE_DECL (c))); continue; } case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_REDUCTION: break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) { lastprivate_firstprivate = true; if (pass != 0) continue; } break; default: continue; } new_var = var = OMP_CLAUSE_DECL (c); if (c_kind != OMP_CLAUSE_COPYIN) new_var = lookup_decl (var, ctx); if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN) { if (pass != 0) continue; } else if (is_variable_sized (var)) { /* For variable sized types, we need to allocate the actual storage here. Call alloca and store the result in the pointer decl that we created elsewhere. */ if (pass == 0) continue; ptr = DECL_VALUE_EXPR (new_var); gcc_assert (TREE_CODE (ptr) == INDIRECT_REF); ptr = TREE_OPERAND (ptr, 0); gcc_assert (DECL_P (ptr)); x = TYPE_SIZE_UNIT (TREE_TYPE (new_var)); args = tree_cons (NULL, x, NULL); x = built_in_decls[BUILT_IN_ALLOCA]; x = build_function_call_expr (x, args); x = fold_convert (TREE_TYPE (ptr), x); x = build2 (MODIFY_EXPR, void_type_node, ptr, x); gimplify_and_add (x, ilist); } else if (is_reference (var)) { /* For references that are being privatized for Fortran, allocate new backing storage for the new pointer variable. This allows us to avoid changing all the code that expects a pointer to something that expects a direct variable. Note that this doesn't apply to C++, since reference types are disallowed in data sharing clauses there, except for NRV optimized return values. */ if (pass == 0) continue; x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var))); if (TREE_CONSTANT (x)) { const char *name = NULL; if (DECL_NAME (var)) name = IDENTIFIER_POINTER (DECL_NAME (new_var)); x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)), name); gimple_add_tmp_var (x); x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var)); } else { args = tree_cons (NULL, x, NULL); x = built_in_decls[BUILT_IN_ALLOCA]; x = build_function_call_expr (x, args); x = fold_convert (TREE_TYPE (new_var), x); } x = build2 (MODIFY_EXPR, void_type_node, new_var, x); gimplify_and_add (x, ilist); new_var = build_fold_indirect_ref (new_var); } else if (c_kind == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { if (pass == 0) continue; } else if (pass != 0) continue; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: /* Shared global vars are just accessed directly. */ if (is_global_var (new_var)) break; /* Set up the DECL_VALUE_EXPR for shared variables now. This needs to be delayed until after fixup_child_record_type so that we get the correct type during the dereference. */ by_ref = use_pointer_for_field (var, true); x = build_receiver_ref (var, by_ref, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; /* ??? If VAR is not passed by reference, and the variable hasn't been initialized yet, then we'll get a warning for the store into the omp_data_s structure. Ideally, we'd be able to notice this and not store anything at all, but we're generating code too early. Suppress the warning. */ if (!by_ref) TREE_NO_WARNING (var) = 1; break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: x = lang_hooks.decls.omp_clause_default_ctor (c, new_var); if (x) gimplify_and_add (x, ilist); /* FALLTHRU */ do_dtor: x = lang_hooks.decls.omp_clause_dtor (c, new_var); if (x) { dtor = x; gimplify_stmt (&dtor); tsi_link_before (&diter, dtor, TSI_SAME_STMT); } break; case OMP_CLAUSE_FIRSTPRIVATE: x = build_outer_var_ref (var, ctx); x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x); gimplify_and_add (x, ilist); goto do_dtor; break; case OMP_CLAUSE_COPYIN: by_ref = use_pointer_for_field (var, false); x = build_receiver_ref (var, by_ref, ctx); x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x); append_to_statement_list (x, &copyin_seq); copyin_by_ref |= by_ref; break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist); OMP_CLAUSE_REDUCTION_INIT (c) = NULL; } else { x = omp_reduction_init (c, TREE_TYPE (new_var)); gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE); x = build2 (MODIFY_EXPR, void_type_node, new_var, x); gimplify_and_add (x, ilist); } break; default: gcc_unreachable (); } } } /* The copyin sequence is not to be executed by the main thread, since that would result in self-copies. Perhaps not visible to scalars, but it certainly is to C++ operator=. */ if (copyin_seq) { x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; x = build_function_call_expr (x, NULL); x = build2 (NE_EXPR, boolean_type_node, x, build_int_cst (TREE_TYPE (x), 0)); x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL); gimplify_and_add (x, ilist); } /* If any copyin variable is passed by reference, we must ensure the master thread doesn't modify it before it is copied over in all threads. Similarly for variables in both firstprivate and lastprivate clauses we need to ensure the lastprivate copying happens after firstprivate copying in all threads. */ if (copyin_by_ref || lastprivate_firstprivate) build_omp_barrier (ilist); } /* Generate code to implement the LASTPRIVATE clauses. This is used for both parallel and workshare constructs. PREDICATE may be NULL if it's always true. */ static void lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list, omp_context *ctx) { tree sub_list, x, c; /* Early exit if there are no lastprivate clauses. */ clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, look for the clauses on the parallel statement itself. */ if (is_parallel_ctx (ctx)) return; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) return; clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) return; } sub_list = alloc_stmt_list (); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, new_var; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); x = build_outer_var_ref (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var); append_to_statement_list (x, &sub_list); } if (predicate) x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL); else x = sub_list; gimplify_and_add (x, stmt_list); } /* Generate code to implement the REDUCTION clauses. */ static void lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx) { tree sub_list = NULL, x, c; int count = 0; /* First see if there is exactly one reduction clause. Use OMP_ATOMIC update in that case, otherwise use a lock. */ for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { /* Never use OMP_ATOMIC for array reductions. */ count = -1; break; } count++; } if (count == 0) return; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, new_var; enum tree_code code; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (is_reference (var)) new_var = build_fold_indirect_ref (new_var); ref = build_outer_var_ref (var, ctx); code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (code == MINUS_EXPR) code = PLUS_EXPR; if (count == 1) { tree addr = build_fold_addr_expr (ref); addr = save_expr (addr); ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr); x = fold_build2 (code, TREE_TYPE (ref), ref, new_var); x = build2 (OMP_ATOMIC, void_type_node, addr, x); gimplify_and_add (x, stmt_list); return; } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); if (is_reference (var)) ref = build_fold_addr_expr (ref); SET_DECL_VALUE_EXPR (placeholder, ref); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list); OMP_CLAUSE_REDUCTION_MERGE (c) = NULL; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL; } else { x = build2 (code, TREE_TYPE (ref), ref, new_var); ref = build_outer_var_ref (var, ctx); x = build2 (MODIFY_EXPR, void_type_node, ref, x); append_to_statement_list (x, &sub_list); } } x = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, stmt_list); gimplify_and_add (sub_list, stmt_list); x = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, stmt_list); } /* Generate code to implement the COPYPRIVATE clauses. */ static void lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, x; bool by_ref; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE) continue; var = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (var, false); ref = build_sender_ref (var, ctx); x = (ctx->is_nested) ? lookup_decl_in_outer_ctx (var, ctx) : var; x = by_ref ? build_fold_addr_expr (x) : x; x = build2 (MODIFY_EXPR, void_type_node, ref, x); gimplify_and_add (x, slist); ref = build_receiver_ref (var, by_ref, ctx); if (is_reference (var)) { ref = build_fold_indirect_ref (ref); var = build_fold_indirect_ref (var); } x = lang_hooks.decls.omp_clause_assign_op (c, var, ref); gimplify_and_add (x, rlist); } } /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE, and REDUCTION from the sender (aka parent) side. */ static void lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree val, ref, x, var; bool by_ref, do_in = false, do_out = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: break; default: continue; } var = val = OMP_CLAUSE_DECL (c); if (ctx->is_nested) var = lookup_decl_in_outer_ctx (val, ctx); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN && is_global_var (var)) continue; if (is_variable_sized (val)) continue; by_ref = use_pointer_for_field (val, false); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: do_in = true; break; case OMP_CLAUSE_LASTPRIVATE: if (by_ref || is_reference (val)) { if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) continue; do_in = true; } else do_out = true; break; case OMP_CLAUSE_REDUCTION: do_in = true; do_out = !(by_ref || is_reference (val)); break; default: gcc_unreachable (); } if (do_in) { ref = build_sender_ref (val, ctx); x = by_ref ? build_fold_addr_expr (var) : var; x = build2 (MODIFY_EXPR, void_type_node, ref, x); gimplify_and_add (x, ilist); } if (do_out) { ref = build_sender_ref (val, ctx); x = build2 (MODIFY_EXPR, void_type_node, var, ref); gimplify_and_add (x, olist); } } } /* Generate code to implement SHARED from the sender (aka parent) side. This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that got automatically shared. */ static void lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx) { tree var, ovar, nvar, f, x; if (ctx->record_type == NULL) return; for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f)) { ovar = DECL_ABSTRACT_ORIGIN (f); nvar = maybe_lookup_decl (ovar, ctx); if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar)) continue; var = ovar; /* If CTX is a nested parallel directive. Find the immediately enclosing parallel or workshare construct that contains a mapping for OVAR. */ if (ctx->is_nested) var = lookup_decl_in_outer_ctx (ovar, ctx); if (use_pointer_for_field (ovar, true)) { x = build_sender_ref (ovar, ctx); var = build_fold_addr_expr (var); x = build2 (MODIFY_EXPR, void_type_node, x, var); gimplify_and_add (x, ilist); } else { x = build_sender_ref (ovar, ctx); x = build2 (MODIFY_EXPR, void_type_node, x, var); gimplify_and_add (x, ilist); x = build_sender_ref (ovar, ctx); x = build2 (MODIFY_EXPR, void_type_node, var, x); gimplify_and_add (x, olist); } } } /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, tree entry_stmt, tree ws_args) { tree t, args, val, cond, c, list, clauses; block_stmt_iterator si; int start_ix; clauses = OMP_PARALLEL_CLAUSES (entry_stmt); push_gimplify_context (); /* Determine what flavor of GOMP_parallel_start we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL_START; if (is_combined_parallel (region)) { switch (region->inner->type) { case OMP_FOR: start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START + region->inner->sched_kind; break; case OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) val = OMP_CLAUSE_NUM_THREADS_EXPR (c); /* Ensure 'val' is of the correct type. */ val = fold_convert (unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { block_stmt_iterator si; cond = gimple_boolify (cond); if (integer_zerop (val)) val = build2 (EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e; tree t, then_lab, else_lab, tmp; tmp = create_tmp_var (TREE_TYPE (val), NULL); e = split_block (bb, NULL); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); then_lab = create_artificial_label (); else_lab = create_artificial_label (); t = build3 (COND_EXPR, void_type_node, cond, build_and_jump (&then_lab), build_and_jump (&else_lab)); si = bsi_start (cond_bb); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (then_bb); t = build1 (LABEL_EXPR, void_type_node, then_lab); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, tmp, val); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); si = bsi_start (else_bb); t = build1 (LABEL_EXPR, void_type_node, else_lab); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, tmp, build_int_cst (unsigned_type_node, 1)); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); make_edge (then_bb, bb, EDGE_FALLTHRU); make_edge (else_bb, bb, EDGE_FALLTHRU); val = tmp; } list = NULL_TREE; val = get_formal_tmp_var (val, &list); si = bsi_start (bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); } list = NULL_TREE; args = tree_cons (NULL, val, NULL); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); args = tree_cons (NULL, t, args); t = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt)); args = tree_cons (NULL, t, args); if (ws_args) args = chainon (args, ws_args); t = built_in_decls[start_ix]; t = build_function_call_expr (t, args); gimplify_and_add (t, &list); t = OMP_PARALLEL_DATA_ARG (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); args = tree_cons (NULL, t, NULL); t = build_function_call_expr (OMP_PARALLEL_FN (entry_stmt), args); gimplify_and_add (t, &list); t = built_in_decls[BUILT_IN_GOMP_PARALLEL_END]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, &list); si = bsi_last (bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); pop_gimplify_context (NULL_TREE); } /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch handler. This prevents programs from violating the structured block semantics with throws. */ static void maybe_catch_exception (tree *stmt_p) { tree f, t; if (!flag_exceptions) return; if (lang_protect_cleanup_actions) t = lang_protect_cleanup_actions (); else { t = built_in_decls[BUILT_IN_TRAP]; t = build_function_call_expr (t, NULL); } f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL); EH_FILTER_MUST_NOT_THROW (f) = 1; gimplify_and_add (t, &EH_FILTER_FAILURE (f)); t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL); append_to_statement_list (f, &TREE_OPERAND (t, 1)); *stmt_p = NULL; append_to_statement_list (t, stmt_p); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree list2chain (tree list) { tree t; for (t = list; t; t = TREE_CHAIN (t)) { tree var = TREE_VALUE (t); if (TREE_CHAIN (t)) TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t)); else TREE_CHAIN (var) = NULL_TREE; } return list ? TREE_VALUE (list) : NULL_TREE; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the OMP_PARALLEL that left a barrier at the end of the OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { block_stmt_iterator si; basic_block exit_bb; edge_iterator ei; edge e; tree t; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's OMP_RETURN. The workshare's OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this OMP_RETURN is a label. */ si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_prev (&si); if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { si = bsi_last (e->src); if (bsi_end_p (si)) continue; t = bsi_stmt (si); if (TREE_CODE (t) == OMP_RETURN) OMP_RETURN_NOWAIT (t) = 1; } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Expand the OpenMP parallel directive starting at REGION. */ static void expand_omp_parallel (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun, *saved_cfun; tree child_fn, block, t, ws_args; block_stmt_iterator si; tree entry_stmt; edge e; bool do_cleanup_cfg = false; entry_stmt = last_stmt (region->entry); child_fn = OMP_PARALLEL_FN (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); saved_cfun = cfun; entry_bb = region->entry; exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL_TREE; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; block_stmt_iterator si; entry_succ_e = single_succ_edge (entry_bb); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL); bsi_remove (&si, true); new_bb = entry_bb; remove_edge (entry_succ_e); if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } do_cleanup_cfg = true; } else { /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (OMP_PARALLEL_DATA_ARG (entry_stmt)) { basic_block entry_succ_bb = single_succ (entry_bb); block_stmt_iterator si; for (si = bsi_start (entry_succ_bb); ; bsi_next (&si)) { tree stmt, arg; gcc_assert (!bsi_end_p (si)); stmt = bsi_stmt (si); if (TREE_CODE (stmt) != MODIFY_EXPR) continue; arg = TREE_OPERAND (stmt, 1); STRIP_NOPS (arg); if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == OMP_PARALLEL_DATA_ARG (entry_stmt)) { if (TREE_OPERAND (stmt, 0) == DECL_ARGUMENTS (child_fn)) bsi_remove (&si, true); else TREE_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn); break; } } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list); DECL_SAVED_TREE (child_fn) = single_succ (entry_bb)->stmt_list; /* Reset DECL_CONTEXT on locals and function arguments. */ for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) DECL_CONTEXT (t) = child_fn; for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the child function. */ si = bsi_last (entry_bb); t = bsi_stmt (si); gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL); bsi_remove (&si, true); e = split_block (entry_bb, t); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Move the parallel region into CHILD_CFUN. We need to reset dominance information because the expansion of the inner regions has invalidated it. */ free_dominance_info (CDI_DOMINATORS); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; cgraph_add_new_function (child_fn); /* Convert OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) == OMP_RETURN); t = build1 (RETURN_EXPR, void_type_node, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } } /* Emit a library call to launch the children threads. */ expand_parallel_call (region, new_bb, entry_stmt, ws_args); if (do_cleanup_cfg) { /* Clean up the unreachable sub-graph we created above. */ free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); cleanup_tree_cfg (); } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we emit 'goto L3'. */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn) { tree l0, l1, l2 = NULL, l3 = NULL; tree type, istart0, iend0, iend; tree t, args, list; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb; basic_block l2_bb = NULL, l3_bb = NULL; block_stmt_iterator si; bool in_combined_parallel = is_combined_parallel (region); type = TREE_TYPE (fd->v); istart0 = create_tmp_var (long_integer_type_node, ".istart0"); iend0 = create_tmp_var (long_integer_type_node, ".iend0"); iend = create_tmp_var (type, NULL); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; gcc_assert ((region->cont != NULL) ^ (region->exit == NULL)); entry_bb = region->entry; l0_bb = create_empty_bb (entry_bb); l1_bb = single_succ (entry_bb); l0 = tree_block_label (l0_bb); l1 = tree_block_label (l1_bb); cont_bb = region->cont; exit_bb = region->exit; if (cont_bb) { l2_bb = create_empty_bb (cont_bb); l3_bb = single_succ (cont_bb); l2 = tree_block_label (l2_bb); l3 = tree_block_label (l3_bb); } si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); if (!in_combined_parallel) { /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ list = alloc_stmt_list (); t = build_fold_addr_expr (iend0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (istart0); args = tree_cons (NULL, t, args); if (fd->chunk_size) { t = fold_convert (long_integer_type_node, fd->chunk_size); args = tree_cons (NULL, t, args); } t = fold_convert (long_integer_type_node, fd->step); args = tree_cons (NULL, t, args); t = fold_convert (long_integer_type_node, fd->n2); args = tree_cons (NULL, t, args); t = fold_convert (long_integer_type_node, fd->n1); args = tree_cons (NULL, t, args); t = build_function_call_expr (built_in_decls[start_fn], args); t = get_formal_tmp_var (t, &list); if (cont_bb) { t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l3)); append_to_statement_list (t, &list); } bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Iteration setup for sequential loop goes in L0_BB. */ list = alloc_stmt_list (); t = fold_convert (type, istart0); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, iend0); t = build2 (MODIFY_EXPR, void_type_node, iend, t); gimplify_and_add (t, &list); si = bsi_start (l0_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Handle the rare case where BODY doesn't ever return. */ if (cont_bb == NULL) { remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, l0_bb, EDGE_FALLTHRU); make_edge (l0_bb, l1_bb, EDGE_FALLTHRU); return; } /* Code to control the increment and predicate for the sequential loop goes in the first half of EXIT_BB (we split EXIT_BB so that we can inherit all the edges going out of the loop body). */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, iend); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l2)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); bsi_insert_after (&si, list, BSI_SAME_STMT); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_remove (&si, true); /* Emit code to get the next parallel iteration in L2_BB. */ list = alloc_stmt_list (); t = build_fold_addr_expr (iend0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (istart0); args = tree_cons (NULL, t, args); t = build_function_call_expr (built_in_decls[next_fn], args); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l3)); append_to_statement_list (t, &list); si = bsi_start (l2_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Add the loop cleanup function. */ si = bsi_last (exit_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_LOOP_END]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); /* Connect the new blocks. */ remove_edge (single_succ_edge (entry_bb)); if (in_combined_parallel) make_edge (entry_bb, l2_bb, EDGE_FALLTHRU); else { make_edge (entry_bb, l0_bb, EDGE_TRUE_VALUE); make_edge (entry_bb, l3_bb, EDGE_FALSE_VALUE); } make_edge (l0_bb, l1_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (cont_bb)); make_edge (cont_bb, l1_bb, EDGE_TRUE_VALUE); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; q = n / nthreads; q += (q * nthreads != n); s0 = q * threadid; e0 = min(s0 + q, n); if (s0 >= e0) goto L2; else goto L0; L0: V = s0 * STEP + N1; e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd) { tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid; tree type, list; basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb; basic_block fin_bb; block_stmt_iterator si; type = TREE_TYPE (fd->v); entry_bb = region->entry; seq_start_bb = create_empty_bb (entry_bb); body_bb = single_succ (entry_bb); cont_bb = region->cont; fin_bb = single_succ (cont_bb); exit_bb = region->exit; l0 = tree_block_label (seq_start_bb); l1 = tree_block_label (body_bb); l2 = tree_block_label (fin_bb); /* Iteration space partitioning goes in ENTRY_BB. */ list = alloc_stmt_list (); t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); nthreads = get_formal_tmp_var (t, &list); t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); threadid = get_formal_tmp_var (t, &list); fd->n1 = fold_convert (type, fd->n1); if (!is_gimple_val (fd->n1)) fd->n1 = get_formal_tmp_var (fd->n1, &list); fd->n2 = fold_convert (type, fd->n2); if (!is_gimple_val (fd->n2)) fd->n2 = get_formal_tmp_var (fd->n2, &list); fd->step = fold_convert (type, fd->step); if (!is_gimple_val (fd->step)) fd->step = get_formal_tmp_var (fd->step, &list); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); if (is_gimple_val (t)) n = t; else n = get_formal_tmp_var (t, &list); t = build2 (TRUNC_DIV_EXPR, type, n, nthreads); q = get_formal_tmp_var (t, &list); t = build2 (MULT_EXPR, type, q, nthreads); t = build2 (NE_EXPR, type, t, n); t = build2 (PLUS_EXPR, type, q, t); q = get_formal_tmp_var (t, &list); t = build2 (MULT_EXPR, type, q, threadid); s0 = get_formal_tmp_var (t, &list); t = build2 (PLUS_EXPR, type, s0, q); t = build2 (MIN_EXPR, type, t, n); e0 = get_formal_tmp_var (t, &list); t = build2 (GE_EXPR, boolean_type_node, s0, e0); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2), build_and_jump (&l0)); append_to_statement_list (t, &list); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ list = alloc_stmt_list (); t = fold_convert (type, s0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, e0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); e = get_formal_tmp_var (t, &list); si = bsi_start (seq_start_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop replaces the OMP_CONTINUE. */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, e); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l2)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) { list = alloc_stmt_list (); build_omp_barrier (&list); bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Connect all the blocks. */ make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, fin_bb, EDGE_TRUE_VALUE); make_edge (entry_bb, seq_start_bb, EDGE_FALSE_VALUE); make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE); find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; n = (adj + N2 - N1) / STEP; trip = 0; L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min(s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd) { tree l0, l1, l2, l3, l4, n, s0, e0, e, t; tree trip, nthreads, threadid; tree type; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb, cont_bb, fin_bb; tree list; block_stmt_iterator si; type = TREE_TYPE (fd->v); entry_bb = region->entry; iter_part_bb = create_empty_bb (entry_bb); seq_start_bb = create_empty_bb (iter_part_bb); body_bb = single_succ (entry_bb); cont_bb = region->cont; trip_update_bb = create_empty_bb (cont_bb); fin_bb = single_succ (cont_bb); exit_bb = region->exit; l0 = tree_block_label (iter_part_bb); l1 = tree_block_label (seq_start_bb); l2 = tree_block_label (body_bb); l3 = tree_block_label (trip_update_bb); l4 = tree_block_label (fin_bb); /* Trip and adjustment setup goes in ENTRY_BB. */ list = alloc_stmt_list (); t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); nthreads = get_formal_tmp_var (t, &list); t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; t = build_function_call_expr (t, NULL); t = fold_convert (type, t); threadid = get_formal_tmp_var (t, &list); fd->n1 = fold_convert (type, fd->n1); if (!is_gimple_val (fd->n1)) fd->n1 = get_formal_tmp_var (fd->n1, &list); fd->n2 = fold_convert (type, fd->n2); if (!is_gimple_val (fd->n2)) fd->n2 = get_formal_tmp_var (fd->n2, &list); fd->step = fold_convert (type, fd->step); if (!is_gimple_val (fd->step)) fd->step = get_formal_tmp_var (fd->step, &list); fd->chunk_size = fold_convert (type, fd->chunk_size); if (!is_gimple_val (fd->chunk_size)) fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list); t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, type, fd->step, t); t = fold_build2 (PLUS_EXPR, type, t, fd->n2); t = fold_build2 (MINUS_EXPR, type, t, fd->n1); t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step); t = fold_convert (type, t); if (is_gimple_val (t)) n = t; else n = get_formal_tmp_var (t, &list); t = build_int_cst (type, 0); trip = get_initialized_tmp_var (t, &list, NULL); si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Iteration space partitioning goes in ITER_PART_BB. */ list = alloc_stmt_list (); t = build2 (MULT_EXPR, type, trip, nthreads); t = build2 (PLUS_EXPR, type, t, threadid); t = build2 (MULT_EXPR, type, t, fd->chunk_size); s0 = get_formal_tmp_var (t, &list); t = build2 (PLUS_EXPR, type, s0, fd->chunk_size); t = build2 (MIN_EXPR, type, t, n); e0 = get_formal_tmp_var (t, &list); t = build2 (LT_EXPR, boolean_type_node, s0, n); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), build_and_jump (&l4)); append_to_statement_list (t, &list); si = bsi_start (iter_part_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ list = alloc_stmt_list (); t = fold_convert (type, s0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = fold_convert (type, e0); t = build2 (MULT_EXPR, type, t, fd->step); t = build2 (PLUS_EXPR, type, t, fd->n1); e = get_formal_tmp_var (t, &list); si = bsi_start (seq_start_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* The code controlling the sequential loop goes in CONT_BB, replacing the OMP_CONTINUE. */ list = alloc_stmt_list (); t = build2 (PLUS_EXPR, type, fd->v, fd->step); t = build2 (MODIFY_EXPR, void_type_node, fd->v, t); gimplify_and_add (t, &list); t = build2 (fd->cond_code, boolean_type_node, fd->v, e); t = get_formal_tmp_var (t, &list); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2), build_and_jump (&l3)); append_to_statement_list (t, &list); si = bsi_last (cont_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); bsi_insert_after (&si, list, BSI_SAME_STMT); bsi_remove (&si, true); /* Trip update code goes into TRIP_UPDATE_BB. */ list = alloc_stmt_list (); t = build_int_cst (type, 1); t = build2 (PLUS_EXPR, type, trip, t); t = build2 (MODIFY_EXPR, void_type_node, trip, t); gimplify_and_add (t, &list); si = bsi_start (trip_update_bb); bsi_insert_after (&si, list, BSI_CONTINUE_LINKING); /* Replace the OMP_RETURN with a barrier, or nothing. */ si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si))) { list = alloc_stmt_list (); build_omp_barrier (&list); bsi_insert_after (&si, list, BSI_SAME_STMT); } bsi_remove (&si, true); /* Connect the new blocks. */ remove_edge (single_succ_edge (entry_bb)); make_edge (entry_bb, iter_part_bb, EDGE_FALLTHRU); make_edge (iter_part_bb, seq_start_bb, EDGE_TRUE_VALUE); make_edge (iter_part_bb, fin_bb, EDGE_FALSE_VALUE); make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU); remove_edge (single_succ_edge (cont_bb)); make_edge (cont_bb, body_bb, EDGE_TRUE_VALUE); make_edge (cont_bb, trip_update_bb, EDGE_FALSE_VALUE); make_edge (trip_update_bb, iter_part_bb, EDGE_FALLTHRU); } /* Expand the OpenMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region) { struct omp_for_data fd; push_gimplify_context (); extract_omp_for_data (last_stmt (region->entry), &fd); region->sched_kind = fd.sched_kind; if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered && region->cont && region->exit) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd); else expand_omp_for_static_chunk (region, &fd); } else { int fn_index = fd.sched_kind + fd.have_ordered * 4; int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index; int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index; expand_omp_for_generic (region, &fd, start_ix, next_ix); } pop_gimplify_context (NULL); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with 'goto L1'. */ static void expand_omp_sections (struct omp_region *region) { tree label_vec, l0, l1, l2, t, u, v, sections_stmt; unsigned i, len; basic_block entry_bb, exit_bb, l0_bb, l1_bb, l2_bb, default_bb; block_stmt_iterator si; struct omp_region *inner; edge e; entry_bb = region->entry; l0_bb = create_empty_bb (entry_bb); l0 = tree_block_label (l0_bb); gcc_assert ((region->cont != NULL) ^ (region->exit == NULL)); l1_bb = region->cont; if (l1_bb) { l2_bb = single_succ (l1_bb); default_bb = create_empty_bb (l1_bb->prev_bb); l1 = tree_block_label (l1_bb); } else { l2_bb = create_empty_bb (l0_bb); default_bb = l2_bb; l1 = NULL; } l2 = tree_block_label (l2_bb); exit_bb = region->exit; v = create_tmp_var (unsigned_type_node, ".section"); /* We will build a switch() with enough cases for all the OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (entry_bb->succs); label_vec = make_tree_vec (len + 2); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the OMP_SECTIONS statement. */ si = bsi_last (entry_bb); sections_stmt = bsi_stmt (si); gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS); if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, len); t = tree_cons (NULL, t, NULL); u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START]; t = build_function_call_expr (u, t); t = build2 (MODIFY_EXPR, void_type_node, v, t); bsi_insert_after (&si, t, BSI_SAME_STMT); } bsi_remove (&si, true); /* The switch() statement replacing OMP_SECTIONS goes in L0_BB. */ si = bsi_start (l0_bb); t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); t = build3 (CASE_LABEL_EXPR, void_type_node, build_int_cst (unsigned_type_node, 0), NULL, l2); TREE_VEC_ELT (label_vec, 0) = t; make_edge (l0_bb, l2_bb, 0); /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, i = 1; inner; inner = inner->next, ++i) { basic_block s_entry_bb, s_exit_bb; s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = tree_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, i); u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t); TREE_VEC_ELT (label_vec, i) = u; si = bsi_last (s_entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION); gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si))); bsi_remove (&si, true); e = single_pred_edge (s_entry_bb); e->flags = 0; redirect_edge_pred (e, l0_bb); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = bsi_last (s_exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = tree_block_label (default_bb); u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t); TREE_VEC_ELT (label_vec, len + 1) = u; make_edge (l0_bb, default_bb, 0); si = bsi_start (default_bb); t = built_in_decls[BUILT_IN_TRAP]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_CONTINUE_LINKING); /* Code to get the next section goes in L1_BB. */ if (l1_bb) { si = bsi_last (l1_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE); t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT]; t = build_function_call_expr (t, NULL); t = build2 (MODIFY_EXPR, void_type_node, v, t); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } /* Cleanup function replaces OMP_RETURN in EXIT_BB. */ if (exit_bb) { si = bsi_last (exit_bb); if (OMP_RETURN_NOWAIT (bsi_stmt (si))) t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT]; else t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END]; t = build_function_call_expr (t, NULL); bsi_insert_after (&si, t, BSI_SAME_STMT); bsi_remove (&si, true); } /* Connect the new blocks. */ if (is_combined_parallel (region)) { /* If this was a combined parallel+sections region, we did not emit a GOMP_sections_start in the entry block, so we just need to jump to L1_BB to get the next section. */ make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); } else make_edge (entry_bb, l0_bb, EDGE_FALLTHRU); if (l1_bb) { e = single_succ_edge (l1_bb); redirect_edge_succ (e, l0_bb); e->flags = EDGE_FALLTHRU; } } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; bool need_barrier = false; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); /* The terminal barrier at the end of a GOMP_single_copy sequence cannot be removed. We need to ensure that the thread that entered the single does not exit before the data is copied out by the other threads. */ if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)), OMP_CLAUSE_COPYPRIVATE)) need_barrier = true; gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = bsi_last (exit_bb); if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier) { tree t = alloc_stmt_list (); build_omp_barrier (&t); bsi_insert_after (&si, t, BSI_SAME_STMT); } bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; block_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = bsi_last (entry_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE || TREE_CODE (bsi_stmt (si)) == OMP_MASTER || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL); bsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = bsi_last (exit_bb); gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN); bsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { while (region) { if (region->inner) expand_omp (region->inner); switch (region->type) { case OMP_PARALLEL: expand_omp_parallel (region); break; case OMP_FOR: expand_omp_for (region); break; case OMP_SECTIONS: expand_omp_sections (region); break; case OMP_SECTION: /* Individual omp sections are handled together with their parent OMP_SECTIONS region. */ break; case OMP_SINGLE: expand_omp_single (region); break; case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: expand_omp_synch (region); break; default: gcc_unreachable (); } region = region->next; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent) { block_stmt_iterator si; tree stmt; basic_block son; si = bsi_last (bb); if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si))) { struct omp_region *region; enum tree_code code; stmt = bsi_stmt (si); code = TREE_CODE (stmt); if (code == OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; /* If REGION is a parallel region, determine whether it is a combined parallel+workshare region. */ if (region->type == OMP_PARALLEL) determine_parallel_type (region); } else if (code == OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else { /* Otherwise, this directive becomes the parent for a new region. */ region = new_omp_region (bb, code, parent); parent = region; } } for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); cleanup_tree_cfg (); free_omp_regions (); return 0; } static bool gate_expand_omp (void) { return flag_openmp != 0 && errorcount == 0; } struct tree_opt_pass pass_expand_omp = { "ompexp", /* name */ gate_expand_omp, /* gate */ execute_expand_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* Routines to lower OpenMP directives into OMP-GIMPLE. */ /* Lower the OpenMP sections directive in *STMT_P. */ static void lower_omp_sections (tree *stmt_p, omp_context *ctx) { tree new_stmt, stmt, body, bind, block, ilist, olist, new_body; tree t, dlist; tree_stmt_iterator tsi; unsigned i, len; stmt = *stmt_p; push_gimplify_context (); dlist = NULL; ilist = NULL; lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx); tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi)) continue; tsi = tsi_start (OMP_SECTIONS_BODY (stmt)); body = alloc_stmt_list (); for (i = 0; i < len; i++, tsi_next (&tsi)) { omp_context *sctx; tree sec_start, sec_end; sec_start = tsi_stmt (tsi); sctx = maybe_lookup_ctx (sec_start); gcc_assert (sctx); append_to_statement_list (sec_start, &body); lower_omp (&OMP_SECTION_BODY (sec_start), sctx); append_to_statement_list (OMP_SECTION_BODY (sec_start), &body); OMP_SECTION_BODY (sec_start) = NULL; if (i == len - 1) { tree l = alloc_stmt_list (); lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL, &l, ctx); append_to_statement_list (l, &body); OMP_SECTION_LAST (sec_start) = 1; } sec_end = make_node (OMP_RETURN); append_to_statement_list (sec_end, &body); } block = make_node (BLOCK); bind = build3 (BIND_EXPR, void_type_node, NULL, body, block); olist = NULL_TREE; lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; new_body = alloc_stmt_list (); append_to_statement_list (ilist, &new_body); append_to_statement_list (stmt, &new_body); append_to_statement_list (bind, &new_body); t = make_node (OMP_CONTINUE); append_to_statement_list (t, &new_body); append_to_statement_list (olist, &new_body); append_to_statement_list (dlist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &new_body); BIND_EXPR_BODY (new_stmt) = new_body; OMP_SECTIONS_BODY (stmt) = NULL; *stmt_p = new_stmt; } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, without a copyprivate clause: if (GOMP_single_start ()) BODY; [ GOMP_barrier (); ] -> unless 'nowait' is present. FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_simple (tree single_stmt, tree *pre_p) { tree t; t = built_in_decls[BUILT_IN_GOMP_SINGLE_START]; t = build_function_call_expr (t, NULL); t = build3 (COND_EXPR, void_type_node, t, OMP_SINGLE_BODY (single_stmt), NULL); gimplify_and_add (t, pre_p); } /* A subroutine of lower_omp_single. Expand the simple form of an OMP_SINGLE, with a copyprivate clause: #pragma omp single copyprivate (a, b, c) Create a new structure to hold copies of 'a', 'b' and 'c' and emit: { if ((copyout_p = GOMP_single_copy_start ()) == NULL) { BODY; copyout.a = a; copyout.b = b; copyout.c = c; GOMP_single_copy_end (&copyout); } else { a = copyout_p->a; b = copyout_p->b; c = copyout_p->c; } GOMP_barrier (); } FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx) { tree ptr_type, t, args, l0, l1, l2, copyin_seq; ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o"); ptr_type = build_pointer_type (ctx->record_type); ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i"); l0 = create_artificial_label (); l1 = create_artificial_label (); l2 = create_artificial_label (); t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START]; t = build_function_call_expr (t, NULL); t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t); gimplify_and_add (t, pre_p); t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl, build_int_cst (ptr_type, 0)); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l1)); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l0); gimplify_and_add (t, pre_p); append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p); copyin_seq = NULL; lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p, &copyin_seq, ctx); t = build_fold_addr_expr (ctx->sender_decl); args = tree_cons (NULL, t, NULL); t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END]; t = build_function_call_expr (t, args); gimplify_and_add (t, pre_p); t = build_and_jump (&l2); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, l1); gimplify_and_add (t, pre_p); append_to_statement_list (copyin_seq, pre_p); t = build1 (LABEL_EXPR, void_type_node, l2); gimplify_and_add (t, pre_p); } /* Expand code for an OpenMP single directive. */ static void lower_omp_single (tree *stmt_p, omp_context *ctx) { tree t, bind, block, single_stmt = *stmt_p, dlist; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt), &BIND_EXPR_BODY (bind), &dlist, ctx); lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx); append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind)); if (ctx->record_type) lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx); else lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind)); OMP_SINGLE_BODY (single_stmt) = NULL; append_to_statement_list (dlist, &BIND_EXPR_BODY (bind)); maybe_catch_exception (&BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt), OMP_CLAUSE_NOWAIT); append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP master directive. */ static void lower_omp_master (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, lab = NULL, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM]; x = build_function_call_expr (x, NULL); x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node); x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab)); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_MASTER_BODY (stmt), ctx); maybe_catch_exception (&OMP_MASTER_BODY (stmt)); append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_MASTER_BODY (stmt) = NULL; x = build1 (LABEL_EXPR, void_type_node, lab); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Expand code for an OpenMP ordered directive. */ static void lower_omp_ordered (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p, x; push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); x = built_in_decls[BUILT_IN_GOMP_ORDERED_START]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_ORDERED_BODY (stmt), ctx); maybe_catch_exception (&OMP_ORDERED_BODY (stmt)); append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_ORDERED_BODY (stmt) = NULL; x = built_in_decls[BUILT_IN_GOMP_ORDERED_END]; x = build_function_call_expr (x, NULL); gimplify_and_add (x, &BIND_EXPR_BODY (bind)); x = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (x) = 1; append_to_statement_list (x, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* Gimplify an OMP_CRITICAL statement. This is a relatively simple substitution of a couple of function calls. But in the NAMED case, requires that languages coordinate a symbol name. It is therefore best put here in common code. */ static GTY((param1_is (tree), param2_is (tree))) splay_tree critical_name_mutexes; static void lower_omp_critical (tree *stmt_p, omp_context *ctx) { tree bind, block, stmt = *stmt_p; tree t, lock, unlock, name; name = OMP_CRITICAL_NAME (stmt); if (name) { tree decl, args; splay_tree_node n; if (!critical_name_mutexes) critical_name_mutexes = splay_tree_new_ggc (splay_tree_compare_pointers); n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name); if (n == NULL) { char *new_str; decl = create_tmp_var_raw (ptr_type_node, NULL); new_str = ACONCAT ((".gomp_critical_user_", IDENTIFIER_POINTER (name), NULL)); DECL_NAME (decl) = get_identifier (new_str); TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; DECL_COMMON (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; cgraph_varpool_finalize_decl (decl); splay_tree_insert (critical_name_mutexes, (splay_tree_key) name, (splay_tree_value) decl); } else decl = (tree) n->value; args = tree_cons (NULL, build_fold_addr_expr (decl), NULL); lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START]; lock = build_function_call_expr (lock, args); args = tree_cons (NULL, build_fold_addr_expr (decl), NULL); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END]; unlock = build_function_call_expr (unlock, args); } else { lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START]; lock = build_function_call_expr (lock, NULL); unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END]; unlock = build_function_call_expr (unlock, NULL); } push_gimplify_context (); block = make_node (BLOCK); *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block); TREE_SIDE_EFFECTS (bind) = 1; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); gimplify_and_add (lock, &BIND_EXPR_BODY (bind)); lower_omp (&OMP_CRITICAL_BODY (stmt), ctx); maybe_catch_exception (&OMP_CRITICAL_BODY (stmt)); append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind)); OMP_CRITICAL_BODY (stmt) = NULL; gimplify_and_add (unlock, &BIND_EXPR_BODY (bind)); t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = 1; append_to_statement_list (t, &BIND_EXPR_BODY (bind)); pop_gimplify_context (bind); BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars); BLOCK_VARS (block) = BIND_EXPR_VARS (bind); } /* A subroutine of lower_omp_for. Generate code to emit the predicate for a lastprivate clause. Given a loop control predicate of (V cond N2), we gate the clause on (!(V cond N2)). The lowered form is appended to *DLIST, iterator initialization is appended to *BODY_P. */ static void lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p, tree *dlist, struct omp_context *ctx) { tree clauses, cond, stmts, vinit, t; enum tree_code cond_code; cond_code = fd->cond_code; cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR; /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ if (host_integerp (fd->step, 0)) { HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2); clauses = OMP_FOR_CLAUSES (fd->for_stmt); stmts = NULL; lower_lastprivate_clauses (clauses, cond, &stmts, ctx); if (stmts != NULL) { append_to_statement_list (stmts, dlist); /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->n1; if (cond_code == EQ_EXPR && host_integerp (fd->n2, 0) && ! integer_zerop (fd->n2)) vinit = build_int_cst (TREE_TYPE (fd->v), 0); /* Initialize the iterator variable, so that threads that don't execute any iterations don't execute the lastprivate clauses by accident. */ t = build2 (MODIFY_EXPR, void_type_node, fd->v, vinit); gimplify_and_add (t, body_p); } } /* Lower code for an OpenMP loop directive. */ static void lower_omp_for (tree *stmt_p, omp_context *ctx) { tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p; struct omp_for_data fd; stmt = *stmt_p; push_gimplify_context (); lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx); lower_omp (&OMP_FOR_BODY (stmt), ctx); /* Move declaration of temporaries in the loop body before we make it go away. */ if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR) record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn); new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (new_stmt) = 1; body_p = &BIND_EXPR_BODY (new_stmt); /* The pre-body and input clauses go before the lowered OMP_FOR. */ ilist = NULL; dlist = NULL; append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p); lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx); /* Lower the header expressions. At this point, we can assume that the header is of the form: #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3) We just need to make sure that VAL1, VAL2 and VAL3 are lowered using the .omp_data_s mapping, if needed. */ rhs_p = &TREE_OPERAND (OMP_FOR_INIT (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); rhs_p = &TREE_OPERAND (TREE_OPERAND (OMP_FOR_INCR (stmt), 1), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, body_p); /* Once lowered, extract the bounds and clauses. */ extract_omp_for_data (stmt, &fd); lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx); append_to_statement_list (stmt, body_p); append_to_statement_list (OMP_FOR_BODY (stmt), body_p); t = make_node (OMP_CONTINUE); append_to_statement_list (t, body_p); /* After the loop, add exit clauses. */ lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx); append_to_statement_list (dlist, body_p); maybe_catch_exception (body_p); /* Region exit marker goes at the end of the loop body. */ t = make_node (OMP_RETURN); OMP_RETURN_NOWAIT (t) = fd.have_nowait; append_to_statement_list (t, body_p); pop_gimplify_context (NULL_TREE); record_vars_into (ctx->block_vars, ctx->cb.dst_fn); OMP_FOR_BODY (stmt) = NULL_TREE; OMP_FOR_PRE_BODY (stmt) = NULL_TREE; *stmt_p = new_stmt; } /* Callback for walk_stmts. Check if *TP only contains OMP_FOR or OMP_PARALLEL. */ static tree check_combined_parallel (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; int *info = wi->info; *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_FOR: case OMP_SECTIONS: *info = *info == 0 ? 1 : -1; break; default: *info = -1; break; } return NULL; } /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context information for the directive. */ static void lower_omp_parallel (tree *stmt_p, omp_context *ctx) { tree clauses, par_bind, par_body, new_body, bind; tree olist, ilist, par_olist, par_ilist; tree stmt, child_fn, t; stmt = *stmt_p; clauses = OMP_PARALLEL_CLAUSES (stmt); par_bind = OMP_PARALLEL_BODY (stmt); par_body = BIND_EXPR_BODY (par_bind); child_fn = ctx->cb.dst_fn; if (!OMP_PARALLEL_COMBINED (stmt)) { struct walk_stmt_info wi; int ws_num = 0; memset (&wi, 0, sizeof (wi)); wi.callback = check_combined_parallel; wi.info = &ws_num; wi.val_only = true; walk_stmts (&wi, &par_bind); if (ws_num == 1) OMP_PARALLEL_COMBINED (stmt) = 1; } push_gimplify_context (); par_olist = NULL_TREE; par_ilist = NULL_TREE; lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx); lower_omp (&par_body, ctx); lower_reduction_clauses (clauses, &par_olist, ctx); /* Declare all the variables created by mapping and the variables declared in the scope of the parallel body. */ record_vars_into (ctx->block_vars, child_fn); record_vars_into (BIND_EXPR_VARS (par_bind), child_fn); if (ctx->record_type) { ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o"); OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl; } olist = NULL_TREE; ilist = NULL_TREE; lower_send_clauses (clauses, &ilist, &olist, ctx); lower_send_shared_vars (&ilist, &olist, ctx); /* Once all the expansions are done, sequence all the different fragments inside OMP_PARALLEL_BODY. */ bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); append_to_statement_list (ilist, &BIND_EXPR_BODY (bind)); new_body = alloc_stmt_list (); if (ctx->record_type) { t = build_fold_addr_expr (ctx->sender_decl); /* fixup_child_record_type might have changed receiver_decl's type. */ t = fold_convert (TREE_TYPE (ctx->receiver_decl), t); t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t); append_to_statement_list (t, &new_body); } append_to_statement_list (par_ilist, &new_body); append_to_statement_list (par_body, &new_body); append_to_statement_list (par_olist, &new_body); maybe_catch_exception (&new_body); t = make_node (OMP_RETURN); append_to_statement_list (t, &new_body); OMP_PARALLEL_BODY (stmt) = new_body; append_to_statement_list (stmt, &BIND_EXPR_BODY (bind)); append_to_statement_list (olist, &BIND_EXPR_BODY (bind)); *stmt_p = bind; pop_gimplify_context (NULL_TREE); } /* Pass *TP back through the gimplifier within the context determined by WI. This handles replacement of DECL_VALUE_EXPR, as well as adjusting the flags on ADDR_EXPR. */ static void lower_regimplify (tree *tp, struct walk_stmt_info *wi) { enum gimplify_status gs; tree pre = NULL; if (wi->is_lhs) gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue); else if (wi->val_only) gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue); else gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue); gcc_assert (gs == GS_ALL_DONE); if (pre) tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT); } /* Copy EXP into a temporary. Insert the initialization statement before TSI. */ static tree init_tmp_var (tree exp, tree_stmt_iterator *tsi) { tree t, stmt; t = create_tmp_var (TREE_TYPE (exp), NULL); if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), t, exp); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi))); tsi_link_before (tsi, stmt, TSI_SAME_STMT); return t; } /* Similarly, but copy from the temporary and insert the statement after the iterator. */ static tree save_tmp_var (tree exp, tree_stmt_iterator *tsi) { tree t, stmt; t = create_tmp_var (TREE_TYPE (exp), NULL); if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; stmt = build2 (MODIFY_EXPR, TREE_TYPE (t), exp, t); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi))); tsi_link_after (tsi, stmt, TSI_SAME_STMT); return t; } /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */ static tree lower_omp_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; omp_context *ctx = wi->info; tree t = *tp; /* If we have issued syntax errors, avoid doing any heavy lifting. Just replace the OpenMP directives with a NOP to avoid confusing RTL expansion. */ if (errorcount && OMP_DIRECTIVE_P (*tp)) { *tp = build_empty_stmt (); return NULL_TREE; } *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_PARALLEL: ctx = maybe_lookup_ctx (t); lower_omp_parallel (tp, ctx); break; case OMP_FOR: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_for (tp, ctx); break; case OMP_SECTIONS: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_sections (tp, ctx); break; case OMP_SINGLE: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_single (tp, ctx); break; case OMP_MASTER: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_master (tp, ctx); break; case OMP_ORDERED: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_ordered (tp, ctx); break; case OMP_CRITICAL: ctx = maybe_lookup_ctx (t); gcc_assert (ctx); lower_omp_critical (tp, ctx); break; case VAR_DECL: if (ctx && DECL_HAS_VALUE_EXPR_P (t)) { lower_regimplify (&t, wi); if (wi->val_only) { if (wi->is_lhs) t = save_tmp_var (t, &wi->tsi); else t = init_tmp_var (t, &wi->tsi); } *tp = t; } break; case ADDR_EXPR: if (ctx) lower_regimplify (tp, wi); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: if (ctx) lower_regimplify (tp, wi); break; case INDIRECT_REF: if (ctx) { wi->is_lhs = false; wi->val_only = true; lower_regimplify (&TREE_OPERAND (t, 0), wi); } break; default: if (!TYPE_P (t) && !DECL_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } static void lower_omp (tree *stmt_p, omp_context *ctx) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = lower_omp_1; wi.info = ctx; wi.val_only = true; wi.want_locations = true; walk_stmts (&wi, stmt_p); } /* Main entry point. */ static unsigned int execute_lower_omp (void) { all_contexts = splay_tree_new (splay_tree_compare_pointers, 0, delete_omp_context); scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL); gcc_assert (parallel_nesting_level == 0); if (all_contexts->root) lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL); if (all_contexts) { splay_tree_delete (all_contexts); all_contexts = NULL; } return 0; } static bool gate_lower_omp (void) { return flag_openmp != 0; } struct tree_opt_pass pass_lower_omp = { "omplower", /* name */ gate_lower_omp, /* gate */ execute_lower_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ 0 /* letter */ }; /* The following is a utility to diagnose OpenMP structured block violations. It is not part of the "omplower" pass, as that's invoked too late. It should be invoked by the respective front ends after gimplification. */ static splay_tree all_labels; /* Check for mismatched contexts and generate an error if needed. Return true if an error is detected. */ static bool diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx) { bool exit_p = true; if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx) return false; /* Try to avoid confusing the user by producing and error message with correct "exit" or "enter" verbage. We prefer "exit" unless we can show that LABEL_CTX is nested within BRANCH_CTX. */ if (branch_ctx == NULL) exit_p = false; else { while (label_ctx) { if (TREE_VALUE (label_ctx) == branch_ctx) { exit_p = false; break; } label_ctx = TREE_CHAIN (label_ctx); } } if (exit_p) error ("invalid exit from OpenMP structured block"); else error ("invalid entry to OpenMP structured block"); *stmt_p = build_empty_stmt (); return true; } /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record where in the tree each label is found. */ static tree diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; tree inner_context; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: /* The minimal context here is just a tree of statements. */ inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL); inner_context = tree_cons (NULL, t, context); wi->info = inner_context; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case LABEL_EXPR: splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t), (splay_tree_value) context); break; default: break; } return NULL_TREE; } /* Pass 2: Check each branch and see if its context differs from that of the destination label's context. */ static tree diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; tree context = (tree) wi->info; splay_tree_node n; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case OMP_PARALLEL: case OMP_SECTIONS: case OMP_SINGLE: walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL); /* FALLTHRU */ case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: wi->info = t; walk_stmts (wi, &OMP_BODY (t)); wi->info = context; break; case OMP_FOR: walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL); wi->info = t; walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL); walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL); walk_stmts (wi, &OMP_FOR_PRE_BODY (t)); walk_stmts (wi, &OMP_FOR_BODY (t)); wi->info = context; break; case GOTO_EXPR: { tree lab = GOTO_DESTINATION (t); if (TREE_CODE (lab) != LABEL_DECL) break; n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE); } break; case SWITCH_EXPR: { tree vec = SWITCH_LABELS (t); int i, len = TREE_VEC_LENGTH (vec); for (i = 0; i < len; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); n = splay_tree_lookup (all_labels, (splay_tree_key) lab); if (diagnose_sb_0 (tp, context, (tree) n->value)) break; } } break; case RETURN_EXPR: diagnose_sb_0 (tp, context, NULL_TREE); break; default: break; } return NULL_TREE; } void diagnose_omp_structured_block_errors (tree fndecl) { tree save_current = current_function_decl; struct walk_stmt_info wi; current_function_decl = fndecl; all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_1; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); memset (&wi, 0, sizeof (wi)); wi.callback = diagnose_sb_2; wi.want_locations = true; wi.want_return_expr = true; walk_stmts (&wi, &DECL_SAVED_TREE (fndecl)); splay_tree_delete (all_labels); all_labels = NULL; current_function_decl = save_current; } #include "gt-omp-low.h"
utility_ops.h
#ifndef CAFFE2_OPERATORS_UTILITY_OPS_H_ #define CAFFE2_OPERATORS_UTILITY_OPS_H_ #include <fstream> #include <sstream> #include "caffe2/core/context.h" #include "caffe2/core/logging.h" #include "caffe2/core/operator.h" #include "caffe2/utils/math.h" namespace caffe2 { const char kPrintFileExtension[] = ".log"; template <class Context> class PrintOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_DISPATCH_HELPER; PrintOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), to_file_(OperatorBase::GetSingleArgument<int>("to_file", 0)), limit_(OperatorBase::GetSingleArgument<int>("limit", 0)) { if (limit_ == 0) { limit_ = INT_MAX; } if (to_file_) { // We will output to file instead of printing on screen. const string& target_folder = ws->RootFolder(); // We will write each individual tensor to its individual file. log_file_.reset(new std::ofstream( target_folder + "/" + def().input(0) + kPrintFileExtension, std::ofstream::out | std::ofstream::trunc)); CHECK(log_file_->good()) << "Failed to open PrintOp file for tensor " << def().input(0) << ". rdstate() = " << log_file_->rdstate(); } } ~PrintOp() { if (log_file_.get()) { log_file_->close(); } } bool RunOnDevice() override { // special-case empty tensors since they may have no meta() if (Input(0).size() == 0) { if (to_file_) { (*log_file_) << std::endl; } else { LOG(INFO) << MetaStr(); } return true; } if (OperatorBase::InputIsType<TensorCPU>(0)) { return DispatchHelper< TensorTypes<float, double, int, long, bool, std::string>>::call( this, OperatorBase::Input<TensorCPU>(0)); } else { return DispatchHelper<TensorTypes<float, double, int, long, bool>>::call( this, Input(0)); } } private: std::string MetaStr() { std::stringstream meta_stream; meta_stream << "Tensor " << def().input(0) << " ("; for (const auto dim : Input(0).dims()) { meta_stream << dim << ","; } meta_stream << "): "; return meta_stream.str(); } template <typename T> bool DoRunWithType() { // A simple strategy to copy tensor if needed, and have the tensor pointer // pointing to the right instantiation. Note that tensor_copy_if_needed // will handle memory deallocation itself so no smart pointer is needed. const TensorCPU* tensor; TensorCPU tensor_copy_if_needed; if (OperatorBase::InputIsType<TensorCPU>(0)) { tensor = &OperatorBase::Input<TensorCPU>(0); } else { tensor_copy_if_needed.CopyFrom(Input(0), &context_); // Make sure that the copy is finished. context_.FinishDeviceComputation(); tensor = &tensor_copy_if_needed; } std::stringstream values_stream; // One most likely doesn't want to print int64-number of items for visual // inspection, so we cast down to int here. int total_count = std::min(tensor->size(), TIndex(limit_)); const T* tensor_data = tensor->template data<T>(); for (int i = 0; i < total_count - 1; ++i) { values_stream << tensor_data[i] << ","; } // We do not add a comma after the last item. values_stream << tensor_data[total_count - 1]; if (to_file_) { (*log_file_) << values_stream.str() << std::endl; } else { // Log to console. LOG(INFO) << MetaStr() << values_stream.str(); } return true; } private: bool to_file_; int limit_; std::unique_ptr<std::ofstream> log_file_; }; /** * @brief Alias op makes the output and the input share the same underlying * storage. * * WARNING: in general, in caffe2's operator interface different tensors should * have different underlying storage, which is the assumption made by * components such as the dependency engine and memory optimization. Thus, in * normal situations you should not use the AliasOp, especially in a normal * forward-backward pass. * * The Alias op is provided so one can achieve true asynchrony, such as * Hogwild, in a graph. But make sure you understand all the implications * similar to multi-thread computation before you use it explicitly. */ template <class Context> class AliasOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(AliasOp); bool RunOnDevice() override { auto& input = Input(0); DCHECK_GT(input.size(), 0); Output(0)->ResizeLike(input); Output(0)->ShareData(input); return true; } }; template <class Context> class FlattenOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(FlattenOp); bool RunOnDevice() override { auto& input = Input(0); auto* output = Output(0); DCHECK_GT(input.size(), 0); output->Resize(vector<TIndex>{input.dim(0), input.size() / input.dim(0)}); context_.template CopyBytes<Context, Context>( input.nbytes(), input.raw_data(), output->raw_mutable_data(input.meta())); return true; } }; // Output gets the data of input(0), but reshapes it like input(1). template <class Context> class ResizeLikeOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(ResizeLikeOp); bool RunOnDevice() override { auto& input0 = Input(0); auto& input1 = Input(1); auto* output = Output(0); DCHECK_EQ(input0.size(), input1.size()); output->ResizeLike(Input(1)); context_.template CopyBytes<Context, Context>( input0.nbytes(), input0.raw_data(), output->raw_mutable_data(input0.meta())); return true; } }; template <typename T, class Context> class SumOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(SumOp); bool RunOnDevice() override { auto& input0 = Input(0); auto* output = Output(0); if (InputSize() == 1) { output->CopyFrom(input0, &context_); return true; } output->ResizeLike(input0); T* output_data = output->template mutable_data<T>(); // Dimension checking for (int i = 1; i < InputSize(); ++i) { CHECK(output->dims() == Input(i).dims()) << ProtoDebugString(def()) << "\n" << output->dims() << "\n" << "Input " << i << ": " << Input(i).dims(); } // Add the first two - works if in-place or not. math::Add( output->size(), input0.template data<T>(), Input(1).template data<T>(), output_data, &context_); // Add remaining. for (int i = 2; i < InputSize(); ++i) { math::Add( output->size(), output_data, Input(i).template data<T>(), output_data, &context_); } return true; } }; // WeightedSumOp computes the weighted sum of several tensors. The input should // be in the form X_0, weight_0, X_1, weight_1, ... where X_i all have the same // shape, and weight_i are size 1 tensors that specifies the weight of each // vector. Note that if one wants to do in-place computation, it could only be // done with X_0 also as the output, but not other X_i. template <typename T, class Context> class WeightedSumOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(WeightedSumOp); bool RunOnDevice() override { DCHECK_EQ(InputSize() % 2, 0); auto& X0 = Input(0); auto& weight0 = Input(1); DCHECK_GT(X0.size(), 0); DCHECK_EQ(weight0.size(), 1); int size = X0.size(); auto* output = Output(0); output->ResizeLike(X0); math::Scale<T, Context>( size, weight0.template data<T>(), X0.template data<T>(), output->template mutable_data<T>(), &context_); for (int i = 2; i < InputSize(); i += 2) { auto& X = Input(i); // Do a check: if the input is the same as output, we have a problem - // in-place update should always only happen with the zeroth input. if (&X == output) { LOG(ERROR) << "Input #" << i << " is the same as output. " << "If you want to do in-place updates, put the output as " << "input #0."; return false; } auto& weight = Input(i + 1); DCHECK_EQ(X.size(), size); DCHECK_EQ(weight.size(), 1); math::Axpy<T, Context>( size, weight.template data<T>(), X.template data<T>(), output->template mutable_data<T>(), &context_); } return true; } }; /** * @brief Update slices of the tensor in-place with weighted sum. * * ScatterWeightedSumOp is similar to WeightedSum and computes the weighted sum * of several tensors. The first tensor has to be in-place and only slices of it * on the first dimension as indexed by INDICES will be updated. * * Input: * X_0 - tensor to be updated * weight_0 - scalar weight for X_0, applied only to slices affected, * INDICES - 1-D list of indices on the first dimension of X_0 that need to be * updated * X_1 - update slices, has to have shape of len(INDICES) + shape(X_0)[1:] * weight_1 - scalar weight for X_1 update * X_2, weight_2, ... * * Output: * X_0 - has to be exactly the same tensor as the input 0 * * Note: The op pretty much ignores the exact shapes of the input arguments and * cares only about sizes. It's done for performance consideration to avoid * unnecessary reshapes. Only first dimension of X_0 is important, let's call it * N. If M is the total size of X_0 and K is the size of INDICES then X_i is * assumed to be of shape K x (M / N) regardless of the real shape. * * Note: Each update in INDICES is applied independently which means that if * duplicated elements are present in INDICES the corresponding slice of X_0 * will be scaled multiple times. Manual collapsing of INDICES is required * beforehand if necessary. * * Note: Updates are applied sequentially by inputs which might have undesired * consequences if the input tensor is accessed concurrently by different op * (e.g. when doing Hogwild). Other threads might see intermediate results even * on individual slice level, e.g. X_0 scaled by weight_0 but without any * updates applied. * * For now really works only on CPU because of INDICES access */ template <typename T, class Context> class ScatterWeightedSumOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(ScatterWeightedSumOp); USE_DISPATCH_HELPER; bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2)); } private: template <typename Index> bool DoRunWithType() { TIndex block_size = Input(0).size_from_dim(1); return DispatchHelper<FixedSizes<1>, Index>::call(this, block_size); } template <typename Index, int FixedSize> bool DoRunWithSize() { DCHECK_EQ(InputSize() % 2, 1); auto& X0 = Input(0); auto& weight0 = Input(1); auto& indices = Input(2); auto* output = Output(0); CHECK_EQ(&X0, output) << "In place operation is required"; DCHECK_GT(X0.size(), 0); DCHECK_GT(X0.ndim(), 0) << "X0 has to be at least the vector"; DCHECK_EQ(weight0.size(), 1); TIndex M = X0.size(); TIndex N = X0.dim(0); TIndex K = indices.size(); TIndex block_size = M / N; T* data = output->template mutable_data<T>(); const Index* idxs = indices.template data<Index>(); T w0 = *weight0.template data<T>(); // It's most likely a constant so exact comparison is fine if (w0 != 1.0) { for (int i = 0; i < K; ++i) { Index idx = idxs[i]; DCHECK(0 <= idx && idx < N) << "Index out of bounds: " << idx << ", range 0 to " << N; math::Scale<T, Context, FixedSize>( block_size, w0, data + block_size * idx, data + block_size * idx, &context_); } } for (int inp = 3; inp < InputSize(); inp += 2) { auto& X = Input(inp); auto& weight = Input(inp + 1); DCHECK_EQ(X.size(), block_size * K); DCHECK_EQ(weight.size(), 1); const T* x_data = X.template data<T>(); T w = *weight.template data<T>(); for (int i = 0; i < K; ++i) { Index idx = idxs[i]; // double-checking the indices, but it's fine as it's DCHECK only DCHECK(0 <= idx && idx < N) << "Index out of bounds: " << idx << ", range 0 to " << N; math::Axpy<T, Context, FixedSize>( block_size, w, x_data + block_size * i, data + block_size * idx, &context_); } } return true; } }; /** * @brief Update slices of the tensor in-place by overriding. * * Input: * DATA - tensor to be updated * INDICES - 1-D list of indices on the first dimension of X_0 that need to be * updated * SLICES - update slices, has to have shape of len(INDICES) + shape(X_0)[1:] * * Output: * DATA - has to be exactly the same tensor as the input 0 * * Note: The op pretty much ignores the exact shapes of the input arguments and * cares only about sizes. It's done for performance consideration to avoid * unnecessary reshapes. Only first dimension of X_0 is important, let's call it * N. If M is the total size of X_0 and K is the size of INDICES then X_i is * assumed to be of shape K x (M / N) regardless of the real shape. * * Note: Each update in INDICES is applied independently which means that if * duplicated elements are present in INDICES arbitrary one will win. * * For now really works only on CPU because of INDICES access */ template <typename T, class Context> class ScatterAssignOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(ScatterAssignOp); bool RunOnDevice() override { // Use run-time polymorphism auto& indices = Input(INDICES); if (indices.template IsType<int32_t>()) { DoRun<int32_t>(); } else if (indices.template IsType<int64_t>()) { DoRun<int64_t>(); } else { LOG(FATAL) << "Unsupported type of INDICES in ScatterAssignOp: " << indices.meta().name(); } return true; } private: template <typename Index> void DoRun() { auto& input = Input(DATA); auto& indices = Input(INDICES); auto& slices = Input(SLICES); auto* output = Output(0); CHECK_EQ(&input, output) << "In place operation is required"; DCHECK_GT(input.ndim(), 0) << "X0 has to be at least the vector"; TIndex M = input.size(); TIndex N = input.dim(0); TIndex K = indices.size(); TIndex block_size = M / N; DCHECK_EQ(slices.size(), block_size * K); // TODO(dzhulgakov): it can be made to work with arbitrary data type by // using raw_mutable_data T* data = output->template mutable_data<T>(); const Index* idxs = indices.template data<Index>(); const T* slicesData = slices.template data<T>(); #pragma omp parallel for for (int i = 0; i < K; ++i) { Index idx = idxs[i]; // double-checking the indices, but it's fine as it's DCHECK only DCHECK(0 <= idx && idx < N) << "Index out of bounds: " << idx << ", range 0 to " << N; context_.template Copy<T, Context, Context>( block_size, slicesData + block_size * i, data + block_size * idx); } } INPUT_TAGS(DATA, INDICES, SLICES); }; template <class Context, class DstContext, class SrcContext> class CopyOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(CopyOp); bool RunOnDevice() override { auto& input = OperatorBase::Input<Tensor<SrcContext>>(0); auto* output = OperatorBase::Output<Tensor<DstContext>>(0); output->ResizeLike(input); this->context_.template CopyBytes<SrcContext, DstContext>( input.nbytes(), input.raw_data(), output->raw_mutable_data(input.meta())); return true; } }; template <class Context> class LengthsToSegmentIdsOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(LengthsToSegmentIdsOp); bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0)); } template <typename Index> bool DoRunWithType() { auto& input = Input(0); auto* output = Output(0); auto* input_data = input.template data<Index>(); CHECK_EQ(input.dims().size(), 1) << "Input must be a vector."; auto total_length = std::accumulate(input_data, input_data + input.size(), 0); output->Resize(total_length); auto* output_data = output->template mutable_data<int32_t>(); int pos = 0; for (int i = 0; i < input.size(); ++i) { auto len = input_data[i]; std::fill(output_data, output_data + len, i); output_data += len; } return true; } }; template <class Context> class SegmentIdsToLengthsOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(SegmentIdsToLengthsOp); bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0)); } template <typename Index> bool DoRunWithType() { auto& input = Input(0); CHECK_EQ(input.dims().size(), 1) << "Input must be a vector."; auto* input_data = input.template data<Index>(); auto input_size = input.size(); auto* output = Output(0); // segment id starts from 0 auto num_segments = input_size ? input_data[input_size - 1] + 1 : 0; output->Resize(num_segments); auto* output_data = output->template mutable_data<int64_t>(); std::fill(output_data, output_data + num_segments, 0); for (int64_t i = 0; i < input_size; i++) { output_data[input_data[i]] += 1; } return true; } }; template <class SIndex, class Context> class SliceOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(SliceOp); bool RunOnDevice() override { auto* output = Output(0); auto& data = Input(0); auto& starts = Input(1); auto& ends = Input(2); auto* starts_data = starts.template data<SIndex>(); auto* ends_data = ends.template data<SIndex>(); CHECK_EQ(starts.ndim(), 1); CHECK_EQ(ends.ndim(), 1); CHECK_LE(data.ndim(), starts.size()); CHECK_EQ(starts.size(), ends.size()); std::vector<SIndex> starts_idx(data.ndim()); std::vector<SIndex> ends_idx(data.ndim()); std::vector<SIndex> dst_sizes(data.ndim()); for (int i = 0; i < data.ndim(); ++i) { if (i >= starts.size()) { starts_idx[i] = 0; ends_idx[i] = data.dims()[i]; continue; } auto start = starts_data[i]; auto end = ends_data[i]; if (start < 0) { start = data.dims()[i] + 1 + start; } if (end < 0) { end = data.dims()[i] + 1 + end; } CHECK_GE(start, 0); CHECK_GE(end, 0); CHECK_LT(start, data.dims()[i]); CHECK_LE(end, data.dims()[i]); CHECK_GE(end, start); starts_idx[i] = start; ends_idx[i] = end; dst_sizes[i] = end - start; } // for now only supports slicing in 1 dimension int dim = -1; for (int i = 0; i < data.ndim(); ++i) { if (starts_idx[i] > 0 || ends_idx[i] < data.dims()[i]) { CHECK_EQ(dim, -1) << "Currently only possible to slice in 1 dimension."; dim = i; } } if (dim == -1) { output->CopyFrom(data, &context_); return true; } auto unit = std::accumulate( data.dims().begin() + dim + 1, data.dims().end(), 1, std::multiplies<SIndex>()); auto num_blocks = std::accumulate( data.dims().begin(), data.dims().begin() + dim, 1, std::multiplies<SIndex>()); output->Resize(dst_sizes); auto* src_bytes = (char*)data.raw_data(); auto* dst_bytes = (char*)output->raw_mutable_data(data.meta()); auto src_nbytes = data.nbytes(); auto dst_nbytes = output->nbytes(); auto src_block_size = unit * data.dims()[dim]; auto dst_block_size = unit * (ends_idx[dim] - starts_idx[dim]); auto src_offset = unit * starts_idx[dim]; if (num_blocks == 0 || dst_block_size == 0) { return true; } if (data.meta().copy()) { CHECK(false) << "Complex types not supported yet."; } else { auto itemsize = data.meta().itemsize(); auto src_block_size_bytes = itemsize * src_block_size; auto dst_block_size_bytes = itemsize * dst_block_size; auto src_offset_bytes = src_bytes + itemsize * src_offset; auto dst_offset_bytes = dst_bytes; for (int i = 0; i < num_blocks; ++i) { DCHECK_LE( src_offset_bytes + dst_block_size_bytes, src_bytes + src_nbytes); DCHECK_LE( dst_offset_bytes + dst_block_size_bytes, dst_bytes + dst_nbytes); this->context_.template CopyBytes<Context, Context>( dst_block_size_bytes, (void*)src_offset_bytes, (void*)dst_offset_bytes); src_offset_bytes += src_block_size_bytes; dst_offset_bytes += dst_block_size_bytes; } } return true; } DISABLE_COPY_AND_ASSIGN(SliceOp); }; template <class Context> class HasElementsOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(HasElementsOp); bool RunOnDevice() override { auto& input = Input(0); auto* output = OperatorBase::Output<TensorCPU>(0); output->Resize(std::vector<TIndex>{}); *output->template mutable_data<bool>() = input.size() > 0; return true; } }; template <class Context> class IsEmptyOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(IsEmptyOp); bool RunOnDevice() override { auto& input = Input(0); auto* output = OperatorBase::Output<TensorCPU>(0); output->Resize(std::vector<TIndex>{}); *output->template mutable_data<bool>() = (input.size() == 0); return true; } }; // RecordShapeOp records the shape of the input tensor to a vector of int. You // mostly don't need this operator explicitly, and it is mostly used in the // autodiff process. template <class Context> class ShapeOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(ShapeOp); bool RunOnDevice() override { auto& input = Input(0); auto* output = OperatorBase::Output<TensorCPU>(0); output->Resize(input.ndim()); TIndex* output_data = output->template mutable_data<TIndex>(); for (int i = 0; i < input.ndim(); ++i) { output_data[i] = input.dim(i); } return true; } }; template <class Context> class SqueezeOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; SqueezeOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), dims_(OperatorBase::GetRepeatedArgument<int>("dims")) { auto originalSize = dims_.size(); std::sort(dims_.begin(), dims_.end()); std::unique(dims_.begin(), dims_.end()); if (dims_.size() < originalSize) { LOG(WARNING) << "Parameter `dims` has repeated dimensions."; } CHECK(dims_.empty() || dims_.front() >= 0) << "Dimension ids must be non-negative."; } bool RunOnDevice() override { auto& input = Input(0); auto* output = Output(0); output->CopyFrom(input, &context_); if (dims_.empty()) { return true; } CHECK_GE(input.dims().back() + 1, dims_.size()) << "Input needs at least " << (dims_.back() + 1) << " dimensions."; int j = 0; std::vector<int> newDims; for (int i = 0; i < input.dims().size(); ++i) { if (j < dims_.size() && dims_[j] == i) { CHECK_EQ(input.dims()[i], 1) << "Dimension " << i << " of input must be 1."; ++j; continue; } newDims.push_back(input.dims().at(i)); } output->Reshape(newDims); return true; } private: vector<int> dims_; public: DISABLE_COPY_AND_ASSIGN(SqueezeOp); }; template <class Context> class ExpandDimsOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; ExpandDimsOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), dims_(OperatorBase::GetRepeatedArgument<int>("dims")) { auto originalSize = dims_.size(); std::sort(dims_.begin(), dims_.end()); std::unique(dims_.begin(), dims_.end()); if (dims_.size() < originalSize) { LOG(WARNING) << "Parameter `dims` has repeated dimensions."; } CHECK(dims_.empty() || dims_.front() >= 0) << "Dimension ids must be non-negative."; } bool RunOnDevice() override { auto& input = Input(0); auto* output = Output(0); output->CopyFrom(input, &context_); if (dims_.empty()) { return true; } auto newDims = input.dims(); CHECK_GE(input.dims().size() + dims_.size(), dims_.back() + 1) << "Input needs at least " << (1 + dims_.back() - dims_.size()) << " dimensions given `dims`."; for (const auto dim : dims_) { newDims.insert(newDims.begin() + dim, 1); } output->Reshape(newDims); return true; } private: vector<int> dims_; }; template <class Context> class GatherOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(GatherOp); bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, OperatorBase::Input<TensorCPU>(INDICES)); } template <typename Index> bool DoRunWithType() { // If we endup using it on GPU doint O(N) memcpy is probably not best :) // TODO: implement prefetching if it starts mattering (TF does it) auto& data = Input(DATA); auto& indices = Input(INDICES); auto* output = Output(0); CHECK_GE(data.ndim(), 1) << "DATA should be at least 1-D"; auto shape = indices.dims(); shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end()); output->Resize(shape); int block_size = data.size() / data.dim(0); auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize(); CAFFE_ENFORCE( block_bytesize == data.nbytes() / data.dim(0), "block_bytesize should be consistent with data dim"); int N = indices.size(); auto src_base = static_cast<const char*>(data.raw_data()); const Index* idxs = indices.template data<Index>(); auto out = static_cast<char*>(output->raw_mutable_data(data.meta())); for (int i = 0; i < N; ++i) { auto src = src_base + idxs[i] * block_bytesize; context_.template CopyItems<Context, Context>( data.meta(), block_size, src, out + block_bytesize * i); } return true; } INPUT_TAGS(DATA, INDICES); }; // Since we just do copying, consider untemplating it on T and using raw_data() /** * Deduplicates input indices vector and optionally produces reverse remapping. * Current implementation produces a sorted list but it's not guaranteed in * general. */ template <class Context> class UniqueOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; USE_SIMPLE_CTOR_DTOR(UniqueOp); bool RunOnDevice() override { // Use run-time polymorphism auto& input = Input(0); if (input.template IsType<int32_t>()) { DoRun<int32_t>(); } else if (input.template IsType<int64_t>()) { DoRun<int64_t>(); } else { LOG(FATAL) << "Unsupported type of input in Unique: " << input.meta().name(); } return true; } private: vector<int> order_; template <typename T> void DoRun() { auto& inputTensor = Input(0); // use dim32 to enforce that it's fine to have remapping of type int int N = inputTensor.dim32(0); CHECK_EQ(inputTensor.ndim(), 1) << "Input should be a vector"; auto* uniqueTensor = Output(UNIQUE); int* remapping = nullptr; if (REMAPPING < OutputSize()) { auto* remappingTensor = Output(REMAPPING); remappingTensor->ResizeLike(inputTensor); remapping = remappingTensor->template mutable_data<int>(); } const T* input = inputTensor.template data<T>(); // TODO(dzhulgakov): if perf becomes an issue consider doing hash table // instead of sorting order_.resize(N); std::iota(order_.begin(), order_.end(), 0); std::sort(order_.begin(), order_.end(), [input](const int x, const int y) { return input[x] < input[y]; }); int K = N; for (int i = 1; i < N; ++i) { K -= input[order_[i]] == input[order_[i - 1]]; } uniqueTensor->Resize(K); T* unique = uniqueTensor->template mutable_data<T>(); K = 0; T prev = -1; for (int i = 0; i < N; ++i) { if (i == 0 || prev != input[order_[i]]) { prev = unique[K++] = input[order_[i]]; } if (remapping) { remapping[order_[i]] = K - 1; } } } public: OUTPUT_TAGS(UNIQUE, REMAPPING); }; template <class Context> class AndOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; AndOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws) {} bool RunOnDevice() override { const auto* i1 = Input(0).template data<bool>(); const auto* i2 = Input(1).template data<bool>(); auto* output = Output(0); output->Resize(std::vector<int>{}); *output->template mutable_data<bool>() = (*i1 && *i2); return true; } }; } // namespace caffe2 #endif // CAFFE2_OPERATORS_UTILITY_OPS_H_
layer_norm_cpu.h
/* * Copyright (c) 2016 Marcin Junczys-Dowmunt, the University of Edinburgh, Adam * Mickiewicz University * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * All or part of this file was contributed by Intel under license: * Copyright (C) 2017-2018 Intel Corporation * SPDX-License-Identifier: MIT * * Function LayerNormCPUKernel is adapated from Marian * https://github.com/marian-nmt/marian-dev/blob/master/src/tensors/cpu/tensor_operators.cpp * */ #ifndef MXNET_OPERATOR_NN_LAYER_NORM_CPU_H_ #define MXNET_OPERATOR_NN_LAYER_NORM_CPU_H_ namespace mxnet { namespace op { /* CPU optimized kernel for LayerNorm assuming axis = -1. * Data is the underlying storage data type. * Accum is the type to use for accumulation. * Apparently there isn't a reduction operator for half_t and anyway it isn't * efficient to use on the CPU, so use float for reduction of half_t. * * width is the number of values being summed to compute a mean. * instances is how many independent layer normalization problems are packed into the tensors. * * Inputs: * data is instances x width * gamma is width * beta is width * * Outputs: * out is instances x width, can be same as data * mean is instances: means of each problem * std is instances: standard deviation of each problem * */ template <typename Data, typename Accum = typename /* By default accumulate in float32 for float16. Otherwise use same type. */ std::conditional<std::is_same<mshadow::half::half_t, Data>::value, float, Data>::type> void LayerNormCPUKernel(size_t width, size_t instances, Data eps, const Data* data, const Data* gamma, const Data* beta, Data* out, Data* mean, Data* std) { // Parallelize over independent instances to normalize. // MSVC says index variable in OpenMP 'for' statement must have signed integral type. const mshadow::index_t signed_instances = static_cast<mshadow::index_t>(instances); #pragma omp parallel for for (nnvm::dim_t j = 0; j < signed_instances; ++j) { const Data* from = data + j * width; // Sum the values to compute mean. Accum sum = 0.f; #pragma omp simd reduction(+ : sum) for (size_t i = 0; i < width; ++i) { sum += from[i]; } Accum mean_value = sum / width; mean[j] = static_cast<Data>(mean_value); // Sum squares from mean to compute stddev. Accum squares = 0.f; #pragma omp simd reduction(+ : squares) for (size_t i = 0; i < width; ++i) { Accum off = from[i] - mean_value; squares += off * off; } Accum sigma = std::sqrt(squares / width + eps); std[j] = static_cast<Data>(sigma); // Write normalized values. Data* to = out + j * width; #pragma omp simd for (size_t i = 0; i < width; ++i) { to[i] = (from[i] - mean_value) * gamma[i] / sigma + beta[i]; } } } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_LAYER_NORM_CPU_H_
OMParrayAdd.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> int main (int argc, char** argv) { int *A, *B, *C; // array pointers int N; // size of arrays int i; // iterator int id, P; // task id, num of tasks double start, stop; // timers #pragma omp parallel private(id) { id = omp_get_thread_num(); if (id == 0) { printf("\nInsert N, size of A and B arrays: "); scanf("%d", &N); } } A = malloc(N * sizeof(int)); B = malloc(N * sizeof(int)); C = malloc(N * sizeof(int)); for (i = 0; i <N; i++) { A[i] = rand()%10; B[i] = rand()%10; } start = omp_get_wtime(); #pragma omp parallel for private(id) for (i = 0; i < N; i++) { id = omp_get_thread_num(); printf("\ntask: %d, i: %d", id, i); C[i] = A[i] + B[i]; } stop = omp_get_wtime(); printf("\n\nA:\n"); for (i = 0; i < N; i++) { printf("%4d", A[i]); } printf("\n"); printf("\nB:\n"); for (i = 0; i < N; i++) { printf("%4d", B[i]); } printf("\n"); printf("\nC:\n"); for (i = 0; i < N; i++) { printf("%4d", C[i]); } printf("\n\nRun time: %.6fs", stop - start); printf("\n\n"); return 0; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 131072UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait != BlendPixelTrait) && (draw_info->fill.alpha_trait == BlendPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { (void) GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelInfoPixel(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickMax(const double x,const double y) { return(x > y ? x : y); } MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelInfo *) NULL); assert(stop_color != (const PixelInfo *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; gradient->radius=MagickMax(gradient->center.x,gradient->center.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetPixelInfo(image,&gradient->stops[i].color); gradient->stops[0].color=(*start_color); gradient->stops[0].offset=0.0; gradient->stops[1].color=(*stop_color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ (void) SetImageColorspace(image,start_color->colorspace,exception); status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *restrict p; register Quantum *restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel=GetPixelChannelChannel(linear_image,i); PixelTrait traits=GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if (((paint_traits & CopyPixelTrait) != 0) || (GetPixelReadMask(linear_image,p) == 0)) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelInfo *target,const PixelInfo *fill, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(fill) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((fill->alpha_trait == BlendPixelTrait) && (image->alpha_trait != BlendPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelInfoPixel(image,fill,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImage) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
simplify.c
struct am { int a; }; struct am bar() { // function returning struct am. struct am hi; return hi; } struct am (* caller)(); // pointer to function returning struct am. struct am (*weird(int something))() { // function that takes an int and returns pointer to function returning struct am. return caller; } int foo() { return 0; } int main() { { struct am4 { int x; } p; int a3; typedef struct { int x5; } x3, x4; } int a = foo(), b = 10 && 11; caller = bar; int hello = 0 > 1? (weird+0)(10+2)().a : 3; l1: a + hello + b; #pragma omp master a = b; #pragma omp single { int x = 0; x++; } #pragma omp for for (a = 10; a < 11; a++) { a = a + 1; } } int intfoo(int arg) { return arg; } int simplify() { int a = foo(), b = 10 && 11; int c, d; c = 2 || 3; d = 3 && 4; c = (3,4); c = 3 ? 0:1; c++; c + foo(); intfoo(2 + 3); foo(); return 0; }
GB_unaryop__minv_int64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_fp64 // op(A') function: GB_tran__minv_int64_fp64 // C type: int64_t // A type: double // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ double #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, aij) \ int64_t z ; GB_CAST_SIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_fp64 ( int64_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "opencl.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { //if (filename) filename[strcspn(filename, "\n\r")] = 0; char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop; if(center){ crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand()%2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ //grayscale_image_3c(crop); free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { //if (filename) filename[strcspn(filename, "\n\r")] = 0; char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = calloc(size, sizeof(box_label)); while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ if(count == size) { size = size * 2; boxes = realloc(boxes, size*sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = rand()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 90; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i,j; for(i = 0; i < n; ++i){ for(j = 0; j < rle[i]; ++j){ im.data[count++] = curr; } curr = 1 - curr; } for(; count < im.h*im.w*im.c; ++count){ im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for(i = 0; i < src.w*src.h; ++i){ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for(k = 0; k < src.c-1; ++k){ for(i = 0; i < s; ++i){ if (src.data[k*s + i]){ for(j = k+1; j < src.c; ++j){ src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x,y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for(y = 0; y < im.h; ++y){ for(x = 0; x < im.w; ++x){ if(im.data[y*im.w + x]){ minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = {minx, miny, maxx-minx + 1, maxy-miny + 1}; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; int j; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); image mask = resize_image(sized, mw, mh); truth[i*(mw*mh+1)] = id; for(j = 0; j < mw*mh; ++j){ truth[i*(mw*mh + 1) + 1 + j] = mask.data[j]; } ++i; free_image(mask); free_image(sized); free(rle); } if(i < num_boxes) truth[i*(mw*mh+1)] = -1; fclose(file); free_image(part); } void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); box b = bound_image(sized); if(b.w > 0){ image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w; truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h; truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w; truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h; int j; for(j = 0; j < mw*mh; ++j){ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, "raw", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if(count > num_boxes) count = num_boxes; float x,y,w,h; int id; int i; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) { ++sub; continue; } truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; } free(boxes); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; //printf("%s %s %d\n", path, labels[i], i); } } if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i,j; for(i = 0; i < n; ++i){ char labelpath[4096]; find_replace(paths[i], "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".BMP", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPeG", ".txt", labelpath); find_replace(labelpath, ".Jpeg", ".txt", labelpath); find_replace(labelpath, ".PNG", ".txt", labelpath); find_replace(labelpath, ".TIF", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".jpeg", ".txt", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".tif", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); for(j = 0; j < k; ++j){ fscanf(file, "%f", &(y.vals[i][j])); } fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; //int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "images", "labels", label); find_replace(label, ".jpg", ".txt", label); FILE *file = fopen(label, "r"); if (!file) continue; //++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } //printf("%d/%d\n", count, n); return y; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); #ifdef GPU_FETCH if (d.X.valsb) free(d.X.valsb); if (d.X.valsb_gpu.ptr) opencl_free_gpu_only(d.X.valsb_gpu); if (d.y.valsb) free(d.y.valsb); if (d.y.valsb_gpu.ptr) opencl_free_gpu_only(d.y.valsb_gpu); #endif } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes+1); int i; for(i = 0; i < w*h; ++i){ mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for(i = 0; i < w*h; ++i){ if(part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y.rows = n; d.y.cols = h*w*classes/div/div; d.y.vals = calloc(d.X.rows, sizeof(float*)); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect); if(flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (coords+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*90; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, 5*boxes); #ifdef GPU_FETCH if (n && !d.X.valsb) d.X.valsb = calloc(n*d.X.rows*d.X.cols, sizeof(float)); if (n && !d.X.valsb_gpu.ptr) d.X.valsb_gpu = opencl_make_array(d.X.valsb, n*d.X.rows*d.X.cols); if (n && !d.y.valsb) d.y.valsb = calloc(n*d.y.rows*d.y.cols, sizeof(float)); if (n && !d.y.valsb_gpu.ptr) d.y.valsb_gpu = opencl_make_array(d.y.valsb, n*d.y.rows*d.y.cols); #endif for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); //float scale = rand_uniform(.25, 2); float scale = 1; float nw, nh; if(new_ar < 1){ nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); random_distort_image(sized, hue, saturation, exposure); int flip = rand()%2; if(flip) flip_image(sized); d.X.vals[i] = sized.data; fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h); free_image(orig); #ifdef GPU_FETCH if (n && d.X.valsb) memcpy(d.X.valsb+i*d.X.cols, d.X.vals[i], d.X.cols*sizeof(float)); if (n && d.y.valsb) memcpy(d.y.valsb+i*d.y.cols, d.y.vals[i], d.y.cols*sizeof(float)); #endif } free(random_paths); #ifdef GPU_FETCH if (n && d.X.valsb) opencl_push_array_map(d.X.valsb_gpu, d.X.valsb, n*d.X.rows*d.X.cols); if (n && d.y.valsb) opencl_push_array_map(d.y.valsb_gpu, d.y.valsb, n*d.y.rows*d.y.cols); #endif return d; } void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA){ *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == ISEG_DATA){ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == INSTANCE_DATA){ *a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA){ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data *buffers = calloc(args.threads, sizeof(data)); pthread_t *threads = calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n, k); if(m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = {0}; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = calloc(orig[0].y.rows, sizeof(float *)); int i; for(i = 0; i < d.X.rows; ++i){ d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for(i = 0; i < divs*divs; ++i){ data d; d.shallow = 0; d.w = orig.w/divs * size; d.h = orig.h/divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(j = 0; j < orig.X.rows; ++j){ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2; int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = {0}; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w*h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(i = 0; i < orig.X.rows; ++i){ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.w=size; d.h=size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } #ifdef GPU_FETCH if (m.valsb_gpu.len != m1.rows*m1.cols + m2.rows*m2.cols) { m.valsb = calloc(m1.rows * m1.cols + m2.rows * m2.cols, sizeof(float)); } count = 0; for (i = 0; i < m1.rows * m1.cols; ++i) { m.valsb[count++] = m1.valsb[i]; } for (i = 0; i < m2.rows * m2.cols; ++i) { m.valsb[count++] = m2.valsb[i]; } if (m.valsb_gpu.len != m1.rows*m1.cols + m2.rows*m2.cols) { if (count && m.valsb) m.valsb_gpu = opencl_make_array(m.valsb, m.rows * m.cols); if (count && m.valsb) opencl_push_array_map(m.valsb_gpu, m.valsb, m.rows * m.cols); } #endif return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = rand()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } #ifdef GPU_FETCH void get_next_batch_gpu(data d, int n, int offset, cl_mem_ext X, cl_mem_ext y) { if (d.X.vals) copy_offset_gpu(n*d.X.cols, d.X.valsb_gpu, offset+n*d.X.cols, 1, X, 0, 1); if (d.y.vals) copy_offset_gpu(n*d.y.cols, d.y.valsb_gpu, offset+n*d.y.cols, 1, y, 0, 1); } #endif void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i+b*10000][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = rand()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = {0}; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = calloc(num, sizeof(float *)); r.y.vals = calloc(num, sizeof(float *)); int i; for(i = 0; i < num; ++i){ int index = rand()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = calloc(train.X.rows, sizeof(float*)); test.X.vals = calloc(test.X.rows, sizeof(float*)); train.y.vals = calloc(train.y.rows, sizeof(float*)); test.y.vals = calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
simpson_omp.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <omp.h> #define THREAD_NUM 16 #define D 5 double f(double x1, double x2, double x3, double x4, double x5) { return exp(-x1*x1 - x2*x2 - x3*x3 - x4*x4 - x5*x5); } int main(int argc, char **argv) { double a = 0.0; double b = 1.0; assert (argc == 2); int n = atoi(argv[1]); double delta = (b-a) / n; double integral = 0.0; double h = delta / 2.0; double w[3]; w[0] = h / 3.0; w[1] = 4.0 * w[0]; w[2] = w[0]; clock_t ts = clock(); struct timeval start, end; gettimeofday(&start, NULL); omp_set_num_threads(THREAD_NUM); #pragma omp parallel for shared(integral) for (int i5 = 0; i5 < n; i5++) { for (int j5 = 0; j5 < 3; j5++) { double x5 = a + i5 * delta + j5 * h; for (int i4 = 0; i4 < n; i4++) { for (int j4 = 0; j4 < 3; j4++) { double x4 = a + i4 * delta + j4 * h; for (int i3 = 0; i3 < n; i3++) { for (int j3 = 0; j3 < 3; j3++) { double x3 = a + i3 * delta + j3 * h; for (int i2 = 0; i2 < n; i2++) { for (int j2 = 0; j2 < 3; j2++) { double x2 = a + i2 * delta + j2 * h; for (int i1 = 0; i1 < n; i1++) { for (int j1 = 0; j1 < 3; j1++) { double x1 = a + i1 * delta + j1 * h; #pragma omp atomic integral += w[j1] * w[j2] * w[j3] * w[j4] * w[j5] * f(x1, x2, x3, x4, x5); } } } } } } } } } } gettimeofday(&end, NULL); double elapsed = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; ts = clock() - ts; printf("%ld clocks (%lf seconds)\n", ts, elapsed); printf("integral is: %lf\n", integral); return 0; }
sstruct_matrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_SStructPMatrix class. * *****************************************************************************/ #include "_hypre_sstruct_mv.h" #include "_hypre_struct_mv.hpp" /*========================================================================== * SStructPMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixRef( hypre_SStructPMatrix *matrix, hypre_SStructPMatrix **matrix_ref ) { hypre_SStructPMatrixRefCount(matrix) ++; *matrix_ref = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixCreate( MPI_Comm comm, hypre_SStructPGrid *pgrid, hypre_SStructStencil **stencils, hypre_SStructPMatrix **pmatrix_ptr ) { hypre_SStructPMatrix *pmatrix; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; hypre_StructStencil *sstencil; HYPRE_Int *vars; hypre_Index *sstencil_shape; HYPRE_Int sstencil_size; HYPRE_Int new_dim; HYPRE_Int *new_sizes; hypre_Index **new_shapes; HYPRE_Int size; hypre_StructGrid *sgrid; HYPRE_Int vi, vj; HYPRE_Int i, j, k; pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1, HYPRE_MEMORY_HOST); hypre_SStructPMatrixComm(pmatrix) = comm; hypre_SStructPMatrixPGrid(pmatrix) = pgrid; hypre_SStructPMatrixStencils(pmatrix) = stencils; nvars = hypre_SStructPGridNVars(pgrid); hypre_SStructPMatrixNVars(pmatrix) = nvars; /* create sstencils */ smaps = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST); sstencils = hypre_TAlloc(hypre_StructStencil **, nvars, HYPRE_MEMORY_HOST); new_sizes = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST); new_shapes = hypre_TAlloc(hypre_Index *, nvars, HYPRE_MEMORY_HOST); size = 0; for (vi = 0; vi < nvars; vi++) { sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { sstencils[vi][vj] = NULL; new_sizes[vj] = 0; } sstencil = hypre_SStructStencilSStencil(stencils[vi]); vars = hypre_SStructStencilVars(stencils[vi]); sstencil_shape = hypre_StructStencilShape(sstencil); sstencil_size = hypre_StructStencilSize(sstencil); smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size, HYPRE_MEMORY_HOST); for (i = 0; i < sstencil_size; i++) { j = vars[i]; new_sizes[j]++; } for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj], HYPRE_MEMORY_HOST); new_sizes[vj] = 0; } } for (i = 0; i < sstencil_size; i++) { j = vars[i]; k = new_sizes[j]; hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]); smaps[vi][i] = k; new_sizes[j]++; } new_dim = hypre_StructStencilNDim(sstencil); for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { sstencils[vi][vj] = hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]); } size = hypre_max(size, new_sizes[vj]); } } hypre_SStructPMatrixSMaps(pmatrix) = smaps; hypre_SStructPMatrixSStencils(pmatrix) = sstencils; hypre_TFree(new_sizes, HYPRE_MEMORY_HOST); hypre_TFree(new_shapes, HYPRE_MEMORY_HOST); /* create smatrices */ smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars, HYPRE_MEMORY_HOST); for (vi = 0; vi < nvars; vi++) { smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { smatrices[vi][vj] = NULL; if (sstencils[vi][vj] != NULL) { sgrid = hypre_SStructPGridSGrid(pgrid, vi); smatrices[vi][vj] = hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]); } } } hypre_SStructPMatrixSMatrices(pmatrix) = smatrices; /* create symmetric */ symmetric = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST); for (vi = 0; vi < nvars; vi++) { symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { symmetric[vi][vj] = 0; } } hypre_SStructPMatrixSymmetric(pmatrix) = symmetric; hypre_SStructPMatrixSEntriesSize(pmatrix) = size; hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size, HYPRE_MEMORY_HOST); hypre_SStructPMatrixRefCount(pmatrix) = 1; *pmatrix_ptr = pmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixDestroy( hypre_SStructPMatrix *pmatrix ) { hypre_SStructStencil **stencils; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; HYPRE_Int vi, vj; if (pmatrix) { hypre_SStructPMatrixRefCount(pmatrix) --; if (hypre_SStructPMatrixRefCount(pmatrix) == 0) { stencils = hypre_SStructPMatrixStencils(pmatrix); nvars = hypre_SStructPMatrixNVars(pmatrix); smaps = hypre_SStructPMatrixSMaps(pmatrix); sstencils = hypre_SStructPMatrixSStencils(pmatrix); smatrices = hypre_SStructPMatrixSMatrices(pmatrix); symmetric = hypre_SStructPMatrixSymmetric(pmatrix); for (vi = 0; vi < nvars; vi++) { HYPRE_SStructStencilDestroy(stencils[vi]); hypre_TFree(smaps[vi], HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { hypre_StructStencilDestroy(sstencils[vi][vj]); hypre_StructMatrixDestroy(smatrices[vi][vj]); } hypre_TFree(sstencils[vi], HYPRE_MEMORY_HOST); hypre_TFree(smatrices[vi], HYPRE_MEMORY_HOST); hypre_TFree(symmetric[vi], HYPRE_MEMORY_HOST); } hypre_TFree(stencils, HYPRE_MEMORY_HOST); hypre_TFree(smaps, HYPRE_MEMORY_HOST); hypre_TFree(sstencils, HYPRE_MEMORY_HOST); hypre_TFree(smatrices, HYPRE_MEMORY_HOST); hypre_TFree(symmetric, HYPRE_MEMORY_HOST); hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix), HYPRE_MEMORY_HOST); hypre_TFree(pmatrix, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixInitialize( hypre_SStructPMatrix *pmatrix ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; /* HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; */ /* HYPRE_Int vi, vj, d, ndim; */ #if 0 ndim = hypre_SStructPMatrixNDim(pmatrix); /* RDF: Why are the ghosts being reset to one? Maybe it needs to be at least * one to set shared coefficients correctly, but not exactly one? */ for (d = 0; d < ndim; d++) { num_ghost[2 * d] = num_ghost[2 * d + 1] = 1; } #endif for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { HYPRE_StructMatrixSetSymmetric(smatrix, symmetric[vi][vj]); /* hypre_StructMatrixSetNumGhost(smatrix, num_ghost); */ hypre_StructMatrixInitialize(smatrix); /* needed to get AddTo accumulation correct between processors */ hypre_StructMatrixClearGhostValues(smatrix); } } } hypre_SStructPMatrixAccumulated(pmatrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetValues( hypre_SStructPMatrix *pmatrix, hypre_Index index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Complex *values, HYPRE_Int action ) { hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var); HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_StructMatrix *smatrix; hypre_BoxArray *grid_boxes; hypre_Box *box, *grow_box; HYPRE_Int *sentries; HYPRE_Int i; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]); sentries = hypre_SStructPMatrixSEntries(pmatrix); for (i = 0; i < nentries; i++) { sentries[i] = smap[entries[i]]; } /* set values inside the grid */ hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values, action, -1, 0); /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); hypre_Index varoffset; HYPRE_Int done = 0; grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if (hypre_IndexInBox(index, box)) { done = 1; break; } } if (!done) { grow_box = hypre_BoxCreate(hypre_BoxArrayNDim(grid_boxes)); hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var), hypre_SStructPGridNDim(pgrid), varoffset); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); hypre_CopyBox(box, grow_box); hypre_BoxGrowByIndex(grow_box, varoffset); if (hypre_IndexInBox(index, grow_box)) { hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values, action, i, 1); break; } } hypre_BoxDestroy(grow_box); } } else { /* Set */ grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if (!hypre_IndexInBox(index, box)) { hypre_StructMatrixClearValues(smatrix, index, nentries, sentries, i, 1); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetBoxValues( hypre_SStructPMatrix *pmatrix, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructPMatrixNDim(pmatrix); hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var); HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_StructMatrix *smatrix; hypre_BoxArray *grid_boxes; HYPRE_Int *sentries; HYPRE_Int i, j; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]); sentries = hypre_SStructPMatrixSEntries(pmatrix); for (i = 0; i < nentries; i++) { sentries[i] = smap[entries[i]]; } /* set values inside the grid */ hypre_StructMatrixSetBoxValues(smatrix, set_box, value_box, nentries, sentries, values, action, -1, 0); /* TODO: Why need DeviceSync? */ #if defined(HYPRE_USING_GPU) hypre_SyncCudaDevice(hypre_handle()); #endif /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); hypre_Index varoffset; hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes; hypre_Box *left_box, *done_box, *int_box; hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var), hypre_SStructPGridNDim(pgrid), varoffset); grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); left_boxes = hypre_BoxArrayCreate(1, ndim); done_boxes = hypre_BoxArrayCreate(2, ndim); temp_boxes = hypre_BoxArrayCreate(0, ndim); /* done_box always points to the first box in done_boxes */ done_box = hypre_BoxArrayBox(done_boxes, 0); /* int_box always points to the second box in done_boxes */ int_box = hypre_BoxArrayBox(done_boxes, 1); hypre_CopyBox(set_box, hypre_BoxArrayBox(left_boxes, 0)); hypre_BoxArraySetSize(left_boxes, 1); hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 0); hypre_ForBoxI(i, grid_boxes) { hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 1); hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box); hypre_BoxGrowByIndex(done_box, varoffset); hypre_ForBoxI(j, left_boxes) { left_box = hypre_BoxArrayBox(left_boxes, j); hypre_IntersectBoxes(left_box, done_box, int_box); hypre_StructMatrixSetBoxValues(smatrix, int_box, value_box, nentries, sentries, values, action, i, 1); } } hypre_BoxArrayDestroy(left_boxes); hypre_BoxArrayDestroy(done_boxes); hypre_BoxArrayDestroy(temp_boxes); } else { /* Set */ hypre_BoxArray *diff_boxes; hypre_Box *grid_box, *diff_box; grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); diff_boxes = hypre_BoxArrayCreate(0, ndim); hypre_ForBoxI(i, grid_boxes) { grid_box = hypre_BoxArrayBox(grid_boxes, i); hypre_BoxArraySetSize(diff_boxes, 0); hypre_SubtractBoxes(set_box, grid_box, diff_boxes); hypre_ForBoxI(j, diff_boxes) { diff_box = hypre_BoxArrayBox(diff_boxes, j); hypre_StructMatrixClearBoxValues(smatrix, diff_box, nentries, sentries, i, 1); } } hypre_BoxArrayDestroy(diff_boxes); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixAccumulate( hypre_SStructPMatrix *pmatrix ) { hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid); HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid); hypre_StructMatrix *smatrix; hypre_Index varoffset; HYPRE_Int num_ghost[2 * HYPRE_MAXDIM]; hypre_StructGrid *sgrid; HYPRE_Int vi, vj, d; hypre_CommInfo *comm_info; hypre_CommPkg *comm_pkg; hypre_CommHandle *comm_handle; /* if values already accumulated, just return */ if (hypre_SStructPMatrixAccumulated(pmatrix)) { return hypre_error_flag; } for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { sgrid = hypre_StructMatrixGrid(smatrix); /* assumes vi and vj vartypes are the same */ hypre_SStructVariableGetOffset(vartypes[vi], ndim, varoffset); for (d = 0; d < ndim; d++) { num_ghost[2 * d] = num_ghost[2 * d + 1] = hypre_IndexD(varoffset, d); } /* accumulate values from AddTo */ hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructMatrixDataSpace(smatrix), hypre_StructMatrixDataSpace(smatrix), hypre_StructMatrixNumValues(smatrix), NULL, 1, hypre_StructMatrixComm(smatrix), &comm_pkg); hypre_InitializeCommunication(comm_pkg, hypre_StructMatrixData(smatrix), hypre_StructMatrixData(smatrix), 1, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); hypre_CommInfoDestroy(comm_info); hypre_CommPkgDestroy(comm_pkg); } } } hypre_SStructPMatrixAccumulated(pmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixAssemble( hypre_SStructPMatrix *pmatrix ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; hypre_SStructPMatrixAccumulate(pmatrix); for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { hypre_StructMatrixClearGhostValues(smatrix); hypre_StructMatrixAssemble(smatrix); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetSymmetric( hypre_SStructPMatrix *pmatrix, HYPRE_Int var, HYPRE_Int to_var, HYPRE_Int symmetric ) { HYPRE_Int **pmsymmetric = hypre_SStructPMatrixSymmetric(pmatrix); HYPRE_Int vstart = var; HYPRE_Int vsize = 1; HYPRE_Int tstart = to_var; HYPRE_Int tsize = 1; HYPRE_Int v, t; if (var == -1) { vstart = 0; vsize = hypre_SStructPMatrixNVars(pmatrix); } if (to_var == -1) { tstart = 0; tsize = hypre_SStructPMatrixNVars(pmatrix); } for (v = vstart; v < vsize; v++) { for (t = tstart; t < tsize; t++) { pmsymmetric[v][t] = symmetric; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixPrint( const char *filename, hypre_SStructPMatrix *pmatrix, HYPRE_Int all ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; char new_filename[255]; for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { hypre_sprintf(new_filename, "%s.%02d.%02d", filename, vi, vj); hypre_StructMatrixPrint(new_filename, smatrix, all); } } } return hypre_error_flag; } /*========================================================================== * SStructUMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixInitialize( hypre_SStructMatrix *matrix ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int nparts = hypre_SStructGraphNParts(graph); hypre_SStructPGrid **pgrids = hypre_SStructGraphPGrids(graph); hypre_SStructStencil ***stencils = hypre_SStructGraphStencils(graph); HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph); HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph); hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); hypre_StructGrid *sgrid; hypre_SStructStencil *stencil; HYPRE_Int *split; HYPRE_Int nvars; HYPRE_Int nrows, rowstart, nnzs ; HYPRE_Int part, var, entry, b, m, mi; HYPRE_Int *row_sizes; HYPRE_Int max_row_size; hypre_BoxArray *boxes; hypre_Box *box; hypre_Box *ghost_box; hypre_IndexRef start; hypre_Index loop_size, stride; HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR); #ifdef HYPRE_USING_OPENMP HYPRE_IJMatrixSetOMPFlag(ijmatrix, 1); /* Use OpenMP */ #endif if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT) { rowstart = hypre_SStructGridGhstartRank(grid); nrows = hypre_SStructGridGhlocalSize(grid) ; } else /* matrix_type == HYPRE_PARCSR */ { rowstart = hypre_SStructGridStartRank(grid); nrows = hypre_SStructGridLocalSize(grid); } /* set row sizes */ m = 0; max_row_size = 0; ghost_box = hypre_BoxCreate(ndim); row_sizes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_SetIndex(stride, 1); for (part = 0; part < nparts; part++) { nvars = hypre_SStructPGridNVars(pgrids[part]); for (var = 0; var < nvars; var++) { sgrid = hypre_SStructPGridSGrid(pgrids[part], var); stencil = stencils[part][var]; split = hypre_SStructMatrixSplit(matrix, part, var); nnzs = 0; for (entry = 0; entry < hypre_SStructStencilSize(stencil); entry++) { if (split[entry] == -1) { nnzs++; } } #if 0 /* TODO: For now, assume stencil is full/complete */ if (hypre_SStructMatrixSymmetric(matrix)) { nnzs = 2 * nnzs - 1; } #endif boxes = hypre_StructGridBoxes(sgrid); hypre_ForBoxI(b, boxes) { box = hypre_BoxArrayBox(boxes, b); hypre_CopyBox(box, ghost_box); if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT) { hypre_BoxGrowByArray(ghost_box, hypre_StructGridNumGhost(sgrid)); } start = hypre_BoxIMin(box); hypre_BoxGetSize(box, loop_size); zypre_BoxLoop1Begin(hypre_SStructMatrixNDim(matrix), loop_size, ghost_box, start, stride, mi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop1For(mi) { row_sizes[m + mi] = nnzs; } zypre_BoxLoop1End(mi); m += hypre_BoxVolume(ghost_box); } max_row_size = hypre_max(max_row_size, nnzs); if (nvneighbors[part][var]) { max_row_size = hypre_max(max_row_size, hypre_SStructStencilSize(stencil)); } } } hypre_BoxDestroy(ghost_box); /* GEC0902 essentially for each UVentry we figure out how many extra columns * we need to add to the rowsizes */ /* RDF: THREAD? */ for (entry = 0; entry < nUventries; entry++) { mi = iUventries[entry]; m = hypre_SStructUVEntryRank(Uventries[mi]) - rowstart; if ((m > -1) && (m < nrows)) { row_sizes[m] += hypre_SStructUVEntryNUEntries(Uventries[mi]); max_row_size = hypre_max(max_row_size, row_sizes[m]); } } /* ZTODO: Update row_sizes based on neighbor off-part couplings */ HYPRE_IJMatrixSetRowSizes (ijmatrix, (const HYPRE_Int *) row_sizes); hypre_TFree(row_sizes, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpSize(matrix) = max_row_size; hypre_SStructMatrixTmpRowCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpColCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpCoeffs(matrix) = hypre_CTAlloc(HYPRE_Complex, max_row_size, HYPRE_MEMORY_HOST); hypre_SStructMatrixTmpRowCoordsDevice(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_DEVICE); hypre_SStructMatrixTmpColCoordsDevice(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_DEVICE); hypre_SStructMatrixTmpCoeffsDevice(matrix) = hypre_CTAlloc(HYPRE_Complex, max_row_size, HYPRE_MEMORY_DEVICE); HYPRE_IJMatrixInitialize(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * 9/09 - AB: modified to use the box manager - here we need to check the * neighbor box manager also *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixSetValues( hypre_SStructMatrix *matrix, HYPRE_Int part, hypre_Index index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_Index *shape = hypre_SStructStencilShape(stencil); HYPRE_Int size = hypre_SStructStencilSize(stencil); hypre_IndexRef offset; hypre_Index to_index; hypre_SStructUVEntry *Uventry; hypre_BoxManEntry *boxman_entry; hypre_SStructBoxManInfo *entry_info; HYPRE_BigInt row_coord; HYPRE_BigInt *col_coords; HYPRE_Int ncoeffs; HYPRE_Complex *coeffs; HYPRE_Int i, entry; HYPRE_BigInt Uverank; HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); HYPRE_Complex *h_values; hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry); /* if not local, check neighbors */ if (boxman_entry == NULL) { hypre_SStructGridFindNborBoxManEntry(grid, part, index, var, &boxman_entry); } if (boxman_entry == NULL) { hypre_error_in_arg(1); hypre_error_in_arg(2); hypre_error_in_arg(3); return hypre_error_flag; } else { hypre_BoxManEntryGetInfo(boxman_entry, (void **) &entry_info); } hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &row_coord, matrix_type); col_coords = hypre_SStructMatrixTmpColCoords(matrix); coeffs = hypre_SStructMatrixTmpCoeffs(matrix); /* RL: copy values to host since the following for-loop is on CPU */ if ( hypre_GetActualMemLocation(HYPRE_MEMORY_DEVICE) != hypre_MEMORY_HOST ) { h_values = hypre_TAlloc(HYPRE_Complex, nentries, HYPRE_MEMORY_HOST); hypre_TMemcpy(h_values, values, HYPRE_Complex, nentries, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } else { h_values = values; } /* RL: TODO Port it to GPU? */ ncoeffs = 0; for (i = 0; i < nentries; i++) { entry = entries[i]; if (entry < size) { /* stencil entries */ offset = shape[entry]; hypre_AddIndexes(index, offset, ndim, to_index); hypre_SStructGridFindBoxManEntry(dom_grid, part, to_index, vars[entry], &boxman_entry); /* if not local, check neighbors */ if (boxman_entry == NULL) { hypre_SStructGridFindNborBoxManEntry(dom_grid, part, to_index, vars[entry], &boxman_entry); } if (boxman_entry != NULL) { hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, to_index, &col_coords[ncoeffs], matrix_type); coeffs[ncoeffs] = h_values[i]; ncoeffs++; } } else { /* non-stencil entries */ entry -= size; hypre_SStructGraphGetUVEntryRank(graph, part, var, index, &Uverank); if (Uverank > -1) { Uventry = hypre_SStructGraphUVEntry(graph, Uverank); col_coords[ncoeffs] = hypre_SStructUVEntryToRank(Uventry, entry); coeffs[ncoeffs] = h_values[i]; ncoeffs++; } } } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_BigInt *d_row_coords = hypre_SStructMatrixTmpRowCoordsDevice(matrix); HYPRE_BigInt *d_col_coords = hypre_SStructMatrixTmpColCoordsDevice(matrix); HYPRE_Complex *d_coeffs = hypre_SStructMatrixTmpCoeffsDevice(matrix); if ( hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(ijmatrix)) == HYPRE_EXEC_DEVICE ) { hypreDevice_BigIntFilln(d_row_coords, ncoeffs, row_coord); hypre_TMemcpy(d_col_coords, col_coords, HYPRE_BigInt, ncoeffs, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(d_coeffs, coeffs, HYPRE_Complex, ncoeffs, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); if (action > 0) { HYPRE_IJMatrixAddToValues(ijmatrix, ncoeffs, NULL, d_row_coords, (const HYPRE_BigInt *) d_col_coords, (const HYPRE_Complex *) d_coeffs); } else if (action > -1) { HYPRE_IJMatrixSetValues(ijmatrix, ncoeffs, NULL, d_row_coords, (const HYPRE_BigInt *) d_col_coords, (const HYPRE_Complex *) d_coeffs); } else { // RL:TODO HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord, col_coords, values); } } else #endif { if (action > 0) { HYPRE_IJMatrixAddToValues(ijmatrix, 1, &ncoeffs, &row_coord, (const HYPRE_BigInt *) col_coords, (const HYPRE_Complex *) coeffs); } else if (action > -1) { HYPRE_IJMatrixSetValues(ijmatrix, 1, &ncoeffs, &row_coord, (const HYPRE_BigInt *) col_coords, (const HYPRE_Complex *) coeffs); } else { HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord, col_coords, values); } } if (h_values != values) { hypre_TFree(h_values, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Note: Entries must all be of type stencil or non-stencil, but not both. * * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * 9/09 - AB: modified to use the box manager- here we need to check the * neighbor box manager also * * To illustrate what is computed below before calling IJSetValues2(), consider * the following example of a 5-pt stencil (c,w,e,s,n) on a 3x2 grid (the 'x' in * arrays 'cols' and 'ijvalues' indicates "no data"): * * nrows = 6 * ncols = 3 4 3 3 4 3 * rows = 0 1 2 3 4 5 * row_indexes = 0 5 10 15 20 25 * cols = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x * ijvalues = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x * entry = c e n c w e n c w n c e s c w e s c w s *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixSetBoxValues( hypre_SStructMatrix *matrix, HYPRE_Int part, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_Index *shape = hypre_SStructStencilShape(stencil); HYPRE_Int size = hypre_SStructStencilSize(stencil); hypre_IndexRef offset; hypre_BoxManEntry **boxman_entries; HYPRE_Int nboxman_entries; hypre_BoxManEntry **boxman_to_entries; HYPRE_Int nboxman_to_entries; HYPRE_Int nrows; HYPRE_Int *ncols, *row_indexes;; HYPRE_BigInt *rows, *cols; HYPRE_Complex *ijvalues; hypre_Box *box; hypre_Box *to_box; hypre_Box *map_box; hypre_Box *int_box; hypre_Index index, stride, loop_size; hypre_IndexRef start; hypre_Index rs, cs; HYPRE_BigInt row_base, col_base; HYPRE_Int ei, entry, ii, jj; HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); box = hypre_BoxCreate(ndim); /*------------------------------------------ * all stencil entries *------------------------------------------*/ if (entries[0] < size) { to_box = hypre_BoxCreate(ndim); map_box = hypre_BoxCreate(ndim); int_box = hypre_BoxCreate(ndim); nrows = hypre_BoxVolume(set_box); ncols = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); rows = hypre_CTAlloc(HYPRE_BigInt, nrows, HYPRE_MEMORY_DEVICE); row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); cols = hypre_CTAlloc(HYPRE_BigInt, nrows * nentries, HYPRE_MEMORY_DEVICE); ijvalues = hypre_CTAlloc(HYPRE_Complex, nrows * nentries, HYPRE_MEMORY_DEVICE); hypre_SetIndex(stride, 1); hypre_SStructGridIntersect(grid, part, var, set_box, -1, &boxman_entries, &nboxman_entries); for (ii = 0; ii < nboxman_entries; ii++) { hypre_SStructBoxManEntryGetStrides(boxman_entries[ii], rs, matrix_type); hypre_CopyBox(set_box, box); hypre_BoxManEntryGetExtents(boxman_entries[ii], hypre_BoxIMin(map_box), hypre_BoxIMax(map_box)); hypre_IntersectBoxes(box, map_box, int_box); hypre_CopyBox(int_box, box); /* For each index in 'box', compute a row of length <= nentries and * insert it into an nentries-length segment of 'cols' and 'ijvalues'. * This may result in gaps, but IJSetValues2() is designed for that. */ nrows = hypre_BoxVolume(box); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(ncols,row_indexes) hypre_LoopBegin(nrows, i) { ncols[i] = 0; row_indexes[i] = i * nentries; } hypre_LoopEnd() #undef DEVICE_VAR #define DEVICE_VAR for (ei = 0; ei < nentries; ei++) { entry = entries[ei]; hypre_CopyBox(box, to_box); offset = shape[entry]; hypre_BoxShiftPos(to_box, offset); hypre_SStructGridIntersect(dom_grid, part, vars[entry], to_box, -1, &boxman_to_entries, &nboxman_to_entries); for (jj = 0; jj < nboxman_to_entries; jj++) { hypre_SStructBoxManEntryGetStrides(boxman_to_entries[jj], cs, matrix_type); hypre_BoxManEntryGetExtents(boxman_to_entries[jj], hypre_BoxIMin(map_box), hypre_BoxIMax(map_box)); hypre_IntersectBoxes(to_box, map_box, int_box); hypre_CopyIndex(hypre_BoxIMin(int_box), index); hypre_SStructBoxManEntryGetGlobalRank(boxman_to_entries[jj], index, &col_base, matrix_type); hypre_BoxShiftNeg(int_box, offset); hypre_CopyIndex(hypre_BoxIMin(int_box), index); hypre_SStructBoxManEntryGetGlobalRank(boxman_entries[ii], index, &row_base, matrix_type); start = hypre_BoxIMin(int_box); hypre_BoxGetSize(int_box, loop_size); #if defined(HYPRE_USING_GPU) hypre_assert(ndim <= 3); HYPRE_Int rs_0, rs_1, rs_2; HYPRE_Int cs_0, cs_1, cs_2; if (ndim > 0) { rs_0 = rs[0]; cs_0 = cs[0]; } if (ndim > 1) { rs_1 = rs[1]; cs_1 = cs[1]; } if (ndim > 2) { rs_2 = rs[2]; cs_2 = cs[2]; } #endif #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(ncols,rows,cols,ijvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, box, start, stride, mi, value_box, start, stride, vi); { hypre_Index index; HYPRE_Int ci; hypre_BoxLoopGetIndex(index); ci = mi * nentries + ncols[mi]; rows[mi] = row_base; cols[ci] = col_base; #if defined(HYPRE_USING_GPU) if (ndim > 0) { rows[mi] += index[0] * rs_0; cols[ci] += index[0] * cs_0; } if (ndim > 1) { rows[mi] += index[1] * rs_1; cols[ci] += index[1] * cs_1; } if (ndim > 2) { rows[mi] += index[2] * rs_2; cols[ci] += index[2] * cs_2; } #else HYPRE_Int d; for (d = 0; d < ndim; d++) { rows[mi] += index[d] * rs[d]; cols[ci] += index[d] * cs[d]; } #endif ijvalues[ci] = values[ei + vi * nentries]; ncols[mi]++; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR } /* end loop through boxman to entries */ hypre_TFree(boxman_to_entries, HYPRE_MEMORY_HOST); } /* end of ei nentries loop */ if (action > 0) { HYPRE_IJMatrixAddToValues2(ijmatrix, nrows, ncols, (const HYPRE_BigInt *) rows, (const HYPRE_Int *) row_indexes, (const HYPRE_BigInt *) cols, (const HYPRE_Complex *) ijvalues); } else if (action > -1) { HYPRE_IJMatrixSetValues2(ijmatrix, nrows, ncols, (const HYPRE_BigInt *) rows, (const HYPRE_Int *) row_indexes, (const HYPRE_BigInt *) cols, (const HYPRE_Complex *) ijvalues); } else { HYPRE_IJMatrixGetValues(ijmatrix, nrows, ncols, rows, cols, values); } } /* end loop through boxman entries */ hypre_TFree(boxman_entries, HYPRE_MEMORY_HOST); hypre_TFree(ncols, HYPRE_MEMORY_DEVICE); hypre_TFree(rows, HYPRE_MEMORY_DEVICE); hypre_TFree(row_indexes, HYPRE_MEMORY_DEVICE); hypre_TFree(cols, HYPRE_MEMORY_DEVICE); hypre_TFree(ijvalues, HYPRE_MEMORY_DEVICE); hypre_BoxDestroy(to_box); hypre_BoxDestroy(map_box); hypre_BoxDestroy(int_box); } /*------------------------------------------ * non-stencil entries *------------------------------------------*/ else { /* RDF: THREAD (Check safety on UMatrixSetValues call) */ hypre_BoxGetSize(set_box, loop_size); hypre_SerialBoxLoop0Begin(ndim, loop_size); { zypre_BoxLoopGetIndex(index); hypre_AddIndexes(index, hypre_BoxIMin(set_box), ndim, index); hypre_SStructUMatrixSetValues(matrix, part, index, var, nentries, entries, values, action); values += nentries; } hypre_SerialBoxLoop0End(); } hypre_BoxDestroy(box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixAssemble( hypre_SStructMatrix *matrix ) { HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); HYPRE_IJMatrixAssemble(ijmatrix); HYPRE_IJMatrixGetObject( ijmatrix, (void **) &hypre_SStructMatrixParCSRMatrix(matrix)); return hypre_error_flag; } /*========================================================================== * SStructMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixRef( hypre_SStructMatrix *matrix, hypre_SStructMatrix **matrix_ref ) { hypre_SStructMatrixRefCount(matrix) ++; *matrix_ref = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSplitEntries( hypre_SStructMatrix *matrix, HYPRE_Int part, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Int *nSentries_ptr, HYPRE_Int **Sentries_ptr, HYPRE_Int *nUentries_ptr, HYPRE_Int **Uentries_ptr ) { hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); HYPRE_Int *split = hypre_SStructMatrixSplit(matrix, part, var); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int entry; HYPRE_Int i; HYPRE_Int nSentries = 0; HYPRE_Int *Sentries = hypre_SStructMatrixSEntries(matrix); HYPRE_Int nUentries = 0; HYPRE_Int *Uentries = hypre_SStructMatrixUEntries(matrix); for (i = 0; i < nentries; i++) { entry = entries[i]; if (entry < hypre_SStructStencilSize(stencil)) { /* stencil entries */ if (split[entry] > -1) { Sentries[nSentries] = split[entry]; nSentries++; } else { Uentries[nUentries] = entry; nUentries++; } } else { /* non-stencil entries */ Uentries[nUentries] = entry; nUentries++; } } *nSentries_ptr = nSentries; *Sentries_ptr = Sentries; *nUentries_ptr = nUentries; *Uentries_ptr = Uentries; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, HYPRE_Int *index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); HYPRE_Int *Sentries; HYPRE_Int *Uentries; HYPRE_Int nSentries; HYPRE_Int nUentries; hypre_SStructPMatrix *pmatrix; hypre_Index cindex; hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries, &nSentries, &Sentries, &nUentries, &Uentries); hypre_CopyToCleanIndex(index, ndim, cindex); /* S-matrix */ if (nSentries > 0) { pmatrix = hypre_SStructMatrixPMatrix(matrix, part); hypre_SStructPMatrixSetValues(pmatrix, cindex, var, nSentries, Sentries, values, action); /* put inter-part couplings in UMatrix and zero them out in PMatrix * (possibly in ghost zones) */ if (nvneighbors[part][var] > 0) { hypre_Box *set_box; HYPRE_Int d; /* This creates boxes with zeroed-out extents */ set_box = hypre_BoxCreate(ndim); for (d = 0; d < ndim; d++) { hypre_BoxIMinD(set_box, d) = cindex[d]; hypre_BoxIMaxD(set_box, d) = cindex[d]; } hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries, set_box, values, action); hypre_BoxDestroy(set_box); } } /* U-matrix */ if (nUentries > 0) { hypre_SStructUMatrixSetValues(matrix, part, cindex, var, nUentries, Uentries, values, action); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetBoxValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); HYPRE_Int *Sentries; HYPRE_Int *Uentries; HYPRE_Int nSentries; HYPRE_Int nUentries; hypre_SStructPMatrix *pmatrix; hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries, &nSentries, &Sentries, &nUentries, &Uentries); /* S-matrix */ if (nSentries > 0) { pmatrix = hypre_SStructMatrixPMatrix(matrix, part); hypre_SStructPMatrixSetBoxValues(pmatrix, set_box, var, nSentries, Sentries, value_box, values, action); /* put inter-part couplings in UMatrix and zero them out in PMatrix * (possibly in ghost zones) */ if (nvneighbors[part][var] > 0) { hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries, value_box, values, action); } } /* U-matrix */ if (nUentries > 0) { hypre_SStructUMatrixSetBoxValues(matrix, part, set_box, var, nUentries, Uentries, value_box, values, action); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Put inter-part couplings in UMatrix and zero them out in PMatrix (possibly in * ghost zones). Assumes that all entries are stencil entries. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetInterPartValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, hypre_Box *set_box, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, hypre_Box *value_box, HYPRE_Complex *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructPMatrix *pmatrix; hypre_SStructPGrid *pgrid; hypre_SStructStencil *stencil; hypre_Index *shape; HYPRE_Int *smap; HYPRE_Int *vars, frvartype, tovartype; hypre_StructMatrix *smatrix; hypre_Box *box, *ibox0, *ibox1, *tobox, *frbox; hypre_Index stride, loop_size; hypre_IndexRef offset, start; hypre_BoxManEntry **frentries, **toentries; hypre_SStructBoxManInfo *frinfo, *toinfo; HYPRE_Complex *tvalues = NULL; HYPRE_Int tvalues_size = 0; HYPRE_Int nfrentries, ntoentries, frpart, topart; HYPRE_Int entry, sentry, ei, fri, toi; pmatrix = hypre_SStructMatrixPMatrix(matrix, part); pgrid = hypre_SStructPMatrixPGrid(pmatrix); frvartype = hypre_SStructPGridVarType(pgrid, var); box = hypre_BoxCreate(ndim); ibox0 = hypre_BoxCreate(ndim); ibox1 = hypre_BoxCreate(ndim); tobox = hypre_BoxCreate(ndim); frbox = hypre_BoxCreate(ndim); stencil = hypre_SStructPMatrixStencil(pmatrix, var); smap = hypre_SStructPMatrixSMap(pmatrix, var); shape = hypre_SStructStencilShape(stencil); vars = hypre_SStructStencilVars(stencil); hypre_SetIndex(stride, 1); for (ei = 0; ei < nentries; ei++) { entry = entries[ei]; sentry = smap[entry]; offset = shape[entry]; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entry]); tovartype = hypre_SStructPGridVarType(pgrid, vars[entry]); /* shift box in the stencil offset direction */ hypre_CopyBox(set_box, box); hypre_AddIndexes(hypre_BoxIMin(box), offset, ndim, hypre_BoxIMin(box)); hypre_AddIndexes(hypre_BoxIMax(box), offset, ndim, hypre_BoxIMax(box)); /* get "to" entries */ hypre_SStructGridIntersect(grid, part, vars[entry], box, -1, &toentries, &ntoentries); for (toi = 0; toi < ntoentries; toi++) { hypre_BoxManEntryGetExtents( toentries[toi], hypre_BoxIMin(tobox), hypre_BoxIMax(tobox)); hypre_IntersectBoxes(box, tobox, ibox0); if (hypre_BoxVolume(ibox0)) { hypre_SStructBoxManEntryGetPart(toentries[toi], part, &topart); /* shift ibox0 back */ hypre_SubtractIndexes(hypre_BoxIMin(ibox0), offset, ndim, hypre_BoxIMin(ibox0)); hypre_SubtractIndexes(hypre_BoxIMax(ibox0), offset, ndim, hypre_BoxIMax(ibox0)); /* get "from" entries */ hypre_SStructGridIntersect(grid, part, var, ibox0, -1, &frentries, &nfrentries); for (fri = 0; fri < nfrentries; fri++) { /* don't set couplings within the same part unless possibly for * cell data (to simplify periodic conditions for users) */ hypre_SStructBoxManEntryGetPart(frentries[fri], part, &frpart); if (topart == frpart) { if ( (frvartype != HYPRE_SSTRUCT_VARIABLE_CELL) || (tovartype != HYPRE_SSTRUCT_VARIABLE_CELL) ) { continue; } hypre_BoxManEntryGetInfo(frentries[fri], (void **) &frinfo); hypre_BoxManEntryGetInfo(toentries[toi], (void **) &toinfo); if ( hypre_SStructBoxManInfoType(frinfo) == hypre_SStructBoxManInfoType(toinfo) ) { continue; } } hypre_BoxManEntryGetExtents( frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox)); hypre_IntersectBoxes(ibox0, frbox, ibox1); if (hypre_BoxVolume(ibox1)) { HYPRE_Int tvalues_new_size = hypre_BoxVolume(ibox1); tvalues = hypre_TReAlloc_v2(tvalues, HYPRE_Complex, tvalues_size, HYPRE_Complex, tvalues_new_size, HYPRE_MEMORY_DEVICE); tvalues_size = tvalues_new_size; if (action >= 0) { /* set or add */ /* copy values into tvalues */ start = hypre_BoxIMin(ibox1); hypre_BoxGetSize(ibox1, loop_size); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(tvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, ibox1, start, stride, mi, value_box, start, stride, vi); { tvalues[mi] = values[ei + vi * nentries]; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR /* put values into UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action); /* zero out values in PMatrix (possibly in ghost) */ hypre_StructMatrixClearBoxValues( smatrix, ibox1, 1, &sentry, -1, 1); } else { /* get */ /* get values from UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action); /* copy tvalues into values */ start = hypre_BoxIMin(ibox1); hypre_BoxGetSize(ibox1, loop_size); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(tvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, ibox1, start, stride, mi, value_box, start, stride, vi); { values[ei + vi * nentries] = tvalues[mi]; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR } /* end if action */ } /* end if nonzero ibox1 */ } /* end of "from" boxman entries loop */ hypre_TFree(frentries, HYPRE_MEMORY_HOST); } /* end if nonzero ibox0 */ } /* end of "to" boxman entries loop */ hypre_TFree(toentries, HYPRE_MEMORY_HOST); } /* end of entries loop */ hypre_BoxDestroy(box); hypre_BoxDestroy(ibox0); hypre_BoxDestroy(ibox1); hypre_BoxDestroy(tobox); hypre_BoxDestroy(frbox); hypre_TFree(tvalues, HYPRE_MEMORY_DEVICE); return hypre_error_flag; }
PoW.c
// Copyright (c) 2016-2018 Ulord Foundation Ltd. #include "PoW.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <assert.h> #ifndef MAC_OSX #include <omp.h> #endif #include "my_time.h" #include "common.h" #include "my_rand48_r.h" #include "oneWayFunction.h" // #define SSE_VERSION /* * Step 1: Initialize working memory. */ void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN]; funcInfor[0].func(input, inputLen, a); uint64_t randSeed[4] = {0, 0, 0, 0}; #ifndef SSE_VERSION struct my_rand48_data randBuffer[4]; #else struct vrand48_data randBuffer[2]; #endif const uint32_t iterNum = WORK_MEMORY_SIZE >> 5; for (i = 0; i < iterNum; ++i) { if (i % K) { #ifndef SSE_VERSION uint64_t num = 0; for (j = 0; j < 4; ++j) { my_rand64_r(&randBuffer[j], &num); memcpy(b + (j << 3), (uint8_t *)&num, 8*sizeof(uint8_t)); } #else vrand64(b, randBuffer); #endif uint8_t shift_num; uint8_t result[OUTPUT_LEN]; reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); rrs(b, OUTPUT_LEN, result, shift_num); memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t)); for (j = 0; j < 32; ++j) { a[j] ^= result[j]; } } else { uint8_t t = 0, shift_num = 0; reduce_bit(a, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48); reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48); reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48); reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48); #ifndef SSE_VERSION my_seed48_r(randSeed[0], &randBuffer[0]); my_seed48_r(randSeed[1], &randBuffer[1]); my_seed48_r(randSeed[2], &randBuffer[2]); my_seed48_r(randSeed[3], &randBuffer[3]); #else vseed48(randSeed , &randBuffer[0]); vseed48(randSeed + 2, &randBuffer[1]); #endif memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t)); } } } /* * Step 2: Modify the working memory contents. */ void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C, uint8_t *result) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[64]; funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a); memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t)); uint64_t r = 0; reduce_bit(a, 32, (uint8_t *)&r, 64); const uint32_t iterNum = L << 6; for (i = 0; i < C; ++i) { uint64_t randSeed = 0; reduce_bit(a, 32, (uint8_t *)&randSeed, 48); struct my_rand48_data randBuffer; my_seed48_r(randSeed, &randBuffer); uint8_t t1, t2, s; uint64_t randNum = 0, base = 0; for (j = 0; j < iterNum; ++j) { my_rand48_r(&randBuffer, &randNum); base = randNum + r; uint64_t offset = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8); offset = (offset << 8) + 1; uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE; uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE; t1 = Maddr[addr1]; t2 = Maddr[addr2]; s = a[j & 0x1f]; Maddr[addr1] = t2 ^ s; Maddr[addr2] = t1 ^ s; b[j & 0x3f] = t1 ^ t2; r = r + s + t1 + t2; } uint8_t t = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(b, 64, a, 256); uint8_t shift_num = 0; uint64_t ir = r + i; reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); for (j = 0; j < OUTPUT_LEN; ++j) { result[j] ^= a[j]; } } } /* * Step 3: Calculate the final result. */ void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) { uint32_t i = 0, j = 0, k = 0; memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t)); const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1; uint32_t it = 0; uint8_t result_rrs[OUTPUT_LEN]; while(1) { uint8_t t = 0, shift_num = 0; uint32_t d = 0; reduce_bit(result, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(result, 32, (uint8_t *)&d, D); ++d; for (j = 0; j < d; ++j) { uint32_t index = i << 5; for (k = 0; k < 32; ++k) { result[k] ^= Maddr[index + k]; } ++i; if (i == num) { it = i + t; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[0].func(result_rrs, 32, result); return; } } it = t + i; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[t].func(result_rrs, 32, result); } } /* * Correctness & Performance test for Proof of work */ void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) { int64_t j; uint32_t inputLen = messLen; uint8_t input[INPUT_LEN], output[OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); // Init all one-way function initOneWayFunction(); uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); printf("****************************** Correctness test (PoW function) ******************************\n"); printf("Test message: %s\n", mess); powFunction(input, inputLen, Maddr, output); view_data_u8("PoW", output, OUTPUT_LEN); printf("*********************************************************************************************\n"); /* printf("*************************************************** Performance test (PoW function) ***************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); printf("00 %-18s\t", "PoW"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f bps ", iterNum / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output, OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); printf("***************************************************************************************************************************************\n"); if (NULL != result) { free(result); result = NULL; } */ if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } #define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL) #define MAX_TEST_INPUT_LEN 140 #define MAX_OUT_FILE_NAME_LEN 25 const char testInputCase[][MAX_TEST_INPUT_LEN] = { "", "HelloWorld", "0123456789" }; void powNistTest(const char *outFileName) { const uint64_t iterNum = 1024UL * 1024UL; // const uint64_t iterNum = 1024UL; uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); assert(NULL != outputBuffer); memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); initOneWayFunction(); uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]); for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) { char curOutFileName[MAX_OUT_FILE_NAME_LEN] = ""; sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx); FILE *fp = NULL; if (NULL != (fp = fopen(curOutFileName, "wb"))) { const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]); uint8_t input[MAX_TEST_INPUT_LEN]; memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t)); memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t)); double startTime = get_wall_time(); powFunction(input, testInputCaseLen, Maddr, outputBuffer); for (uint64_t i = 1, j = 0; i < iterNum; ++i) { memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t)); j += OUTPUT_LEN; powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j); /* if (j == OUTPUT_BUFFER_SIZE) { fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); j = 0; } */ } double endTime = get_wall_time(); double costTime = endTime - startTime; fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %llu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \ testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout); fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); fclose(fp); } else { fprintf(stderr, "Error: Open %s failed!\n", curOutFileName); abort(); } } if (NULL != outputBuffer) { free(outputBuffer); outputBuffer = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } void helloHash(const uint8_t *mess, uint32_t messLen, uint8_t output[OUTPUT_LEN]) { if(messLen != INPUT_LEN) { //won't get in printf("helloHash:Invalid message length %d\n", messLen); return; } int64_t j; uint32_t inputLen =messLen; uint8_t input[INPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, inputLen*sizeof(char)); //operation: input uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); //1024*1024*1 assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); //printf("Test message: %s\n", mess); powFunction(input, inputLen,Maddr, output); //view_data_u8("PoW", output, OUTPUT_LEN); //output if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } int my_rand64_r (struct my_rand48_data *buffer, uint64_t *result) { uint64_t X = buffer->__x; X = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = X; buffer->__x = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; X ^= buffer->__x << 16; *result = X; return 0; } int my_seed48_r (uint64_t seedval, struct my_rand48_data *buffer) { buffer->__x = seedval & 0xffffffffffffULL; buffer->__a = 0x5deece66dULL; buffer->__c = 0xb; return 0; } void powFunction(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, uint8_t *output) { uint8_t c[OUTPUT_LEN]; // Step 1: Initialize working memory. initWorkMemory(input, inputLen, Maddr, 128); // view_data_u8("Maddr", Maddr, OUTPUT_LEN); // Step 2: Modify the working memory contents. modifyWorkMemory(Maddr, 4, WORK_MEMORY_SIZE >> 11, c); // view_data_u8("c", c, OUTPUT_LEN); // Step 3: Calculate the final result. calculateFinalResult(Maddr, c, 8, output); // view_data_u8("output", output, OUTPUT_LEN); } int my_rand48_r (struct my_rand48_data *buffer, uint64_t *result) { *result = (buffer->__x * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = *result; return 0; }
GB_unaryop__minv_int16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_int32 // op(A') function: GB_tran__minv_int16_int32 // C type: int16_t // A type: int32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_int32 ( int16_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
magsac.h
#pragma once #include <limits> #include <chrono> #include <memory> #include "model.h" #include "model_score.h" #include "sampler.h" #include "uniform_sampler.h" #include <math.h> #include "gamma_values.cpp" #ifdef _WIN32 #include <ppl.h> #endif template <class DatumType, class ModelEstimator> class MAGSAC { public: enum Version { // The original version of MAGSAC. It works well, however, can be quite slow in many cases. MAGSAC_ORIGINAL, // The recently proposed MAGSAC++ algorithm which keeps the accuracy of the original MAGSAC but is often orders of magnitude faster. MAGSAC_PLUS_PLUS }; MAGSAC(const Version magsac_version_ = Version::MAGSAC_PLUS_PLUS) : time_limit(std::numeric_limits<double>::max()), // desired_fps(-1), iteration_limit(std::numeric_limits<size_t>::max()), maximum_threshold(10.0), apply_post_processing(true), mininum_iteration_number(50), partition_number(5), core_number(1), number_of_irwls_iters(1), interrupting_threshold(1.0), last_iteration_number(0), log_confidence(0), point_number(0), magsac_version(magsac_version_) { } ~MAGSAC() {} // A function to run MAGSAC. bool run( const cv::Mat &points_, // The input data points const double confidence_, // The required confidence in the results ModelEstimator& estimator_, // The model estimator gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, // The sampler used gcransac::Model &obtained_model_, // The estimated model parameters int &iteration_number_, // The number of iterations done ModelScore &model_score_); // The score of the estimated model // A function to set the maximum inlier-outlier threshold void setMaximumThreshold(const double maximum_threshold_) { maximum_threshold = maximum_threshold_; } // A function to set the inlier-outlier threshold used for speeding up the procedure // and for determining the required number of iterations. void setReferenceThreshold(const double threshold_) { interrupting_threshold = threshold_; } double getReferenceThreshold() { return interrupting_threshold; } // Setting the flag determining if post-processing is needed void applyPostProcessing(bool value_) { apply_post_processing = value_; } // A function to set the maximum number of iterations void setIterationLimit(size_t iteration_limit_) { iteration_limit = iteration_limit_; } // A function to set the minimum number of iterations void setMinimumIterationNumber(size_t mininum_iteration_number_) { mininum_iteration_number = mininum_iteration_number_; } // A function to set the number of cores used in the original MAGSAC algorithm. // In MAGSAC++, it is not used. Note that when multiple MAGSACs run in parallel, // it is beneficial to keep the core number one for each independent MAGSAC. // Otherwise, the threads will act weirdly. void setCoreNumber(size_t core_number_) { if (magsac_version == MAGSAC_PLUS_PLUS) fprintf(stderr, "Setting the core number for MAGSAC++ is deprecated."); core_number = core_number_; } // Setting the number of partitions used in the original MAGSAC algorithm // to speed up the procedure. In MAGSAC++, this parameter is not used. void setPartitionNumber(size_t partition_number_) { if (magsac_version == MAGSAC_PLUS_PLUS) fprintf(stderr, "Setting the partition number for MAGSAC++ is deprecated."); partition_number = partition_number_; } // A function to set a desired minimum frames-per-second (FPS) value. void setFPS(int fps_) { desired_fps = fps_; // The required FPS. // The time limit which the FPS implies time_limit = fps_ <= 0 ? std::numeric_limits<double>::max() : 1.0 / fps_; } // The post-processing algorithm applying sigma-consensus to the input model once. bool postProcessing( const cv::Mat &points, // All data points const gcransac::Model &so_far_the_best_model, // The input model to be improved gcransac::Model &output_model, // The improved model parameters ModelScore &output_score, // The score of the improved model const ModelEstimator &estimator); // The model estimator // The function determining the quality/score of a model using the original MAGSAC // criterion. Note that this function is significantly slower than the quality // function of MAGSAC++. void getModelQuality( const cv::Mat& points_, // All data points const gcransac::Model& model_, // The input model const ModelEstimator& estimator_, // The model estimator double& marginalized_iteration_number_, // The required number of iterations marginalized over the noise scale double& score_); // The score/quality of the model // The function determining the quality/score of a // model using the MAGSAC++ criterion. void getModelQualityPlusPlus( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &score_, // The score to be calculated const double &previous_best_score_); // The score of the previous so-far-the-best model size_t number_of_irwls_iters; protected: Version magsac_version; // The version of MAGSAC used size_t iteration_limit; // Maximum number of iterations allowed size_t mininum_iteration_number; // Minimum number of iteration before terminating double maximum_threshold; // The maximum sigma value size_t core_number; // Number of core used in sigma-consensus double time_limit; // A time limit after the algorithm is interrupted int desired_fps; // The desired FPS (TODO: not tested with MAGSAC) bool apply_post_processing; // Decides if the post-processing step should be applied int point_number; // The current point number int last_iteration_number; // The iteration number implied by the last run of sigma-consensus double log_confidence; // The logarithm of the required confidence size_t partition_number; // Number of partitions used to speed up sigma-consensus double interrupting_threshold; // A threshold to speed up MAGSAC by interrupting the sigma-consensus procedure whenever there is no chance of being better than the previous so-far-the-best model bool sigmaConsensus( const cv::Mat& points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore& score_, const ModelEstimator& estimator_, const ModelScore& best_score_); bool sigmaConsensusPlusPlus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_); }; template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::run( const cv::Mat& points_, const double confidence_, ModelEstimator& estimator_, gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, gcransac::Model& obtained_model_, int& iteration_number_, ModelScore &model_score_) { // Initialize variables std::chrono::time_point<std::chrono::system_clock> start, end; // Variables for time measuring: start and end times std::chrono::duration<double> elapsed_seconds; // Variables for time measuring: elapsed time log_confidence = log(1.0 - confidence_); // The logarithm of 1 - confidence point_number = points_.rows; // Number of points const int sample_size = estimator_.sampleSize(); // The sample size required for the estimation size_t max_iteration = iteration_limit; // The maximum number of iterations initialized to the iteration limit int iteration = 0; // Current number of iterations gcransac::Model so_far_the_best_model; // Current best model ModelScore so_far_the_best_score; // The score of the current best model std::unique_ptr<size_t[]> minimal_sample(new size_t[sample_size]); // The sample used for the estimation std::vector<size_t> pool(points_.rows); for (size_t point_idx = 0; point_idx < point_number; ++point_idx) pool[point_idx] = point_idx; if (points_.rows < sample_size) { fprintf(stderr, "There are not enough points for applying robust estimation. Minimum is %d; while %d are given.\n", sample_size, points_.rows); return false; } // Set the start time variable if there is some time limit set if (desired_fps > -1) start = std::chrono::system_clock::now(); constexpr size_t max_unsuccessful_model_generations = 50; // Main MAGSAC iteration while (mininum_iteration_number > iteration || iteration < max_iteration) { // Increase the current iteration number ++iteration; // Sample a minimal subset std::vector<gcransac::Model> models; // The set of estimated models size_t unsuccessful_model_generations = 0; // The number of unsuccessful model generations // Try to select a minimal sample and estimate the implied model parameters while (++unsuccessful_model_generations < max_unsuccessful_model_generations) { // Get a minimal sample randomly if (!sampler_.sample(pool, // The index pool from which the minimal sample can be selected minimal_sample.get(), // The minimal sample sample_size)) // The size of a minimal sample continue; // Check if the selected sample is valid before estimating the model // parameters which usually takes more time. if (!estimator_.isValidSample(points_, // All points minimal_sample.get())) // The current sample continue; // Estimate the model from the minimal sample if (estimator_.estimateModel(points_, // All data points minimal_sample.get(), // The selected minimal sample &models)) // The estimated models break; } // If the method was not able to generate any usable models, break the cycle. iteration += unsuccessful_model_generations - 1; // Select the so-far-the-best from the estimated models for (const auto &model : models) { ModelScore score; // The score of the current model gcransac::Model refined_model; // The refined model parameters // Apply sigma-consensus to refine the model parameters by marginalizing over the noise level sigma bool success; if (magsac_version == Version::MAGSAC_ORIGINAL) success = sigmaConsensus(points_, model, refined_model, score, estimator_, so_far_the_best_score); else success = sigmaConsensusPlusPlus(points_, model, refined_model, score, estimator_, so_far_the_best_score); // Continue if the model was rejected if (!success || score.score == -1) continue; // Save the iteration number when the current model is found score.iteration = iteration; // Update the best model parameters if needed if (so_far_the_best_score < score) { so_far_the_best_model = refined_model; // Update the best model parameters so_far_the_best_score = score; // Update the best model's score max_iteration = MIN(max_iteration, last_iteration_number); // Update the max iteration number, but do not allow to increase } } // Update the time parameters if a time limit is set if (desired_fps > -1) { end = std::chrono::system_clock::now(); elapsed_seconds = end - start; // Interrupt if the time limit is exceeded if (elapsed_seconds.count() > time_limit) break; } } // Apply sigma-consensus as a post processing step if needed and the estimated model is valid if (apply_post_processing) { // TODO } obtained_model_ = so_far_the_best_model; iteration_number_ = iteration; model_score_ = so_far_the_best_score; return so_far_the_best_score.score > 0; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::postProcessing( const cv::Mat &points_, const gcransac::Model &model_, gcransac::Model &refined_model_, ModelScore &refined_score_, const ModelEstimator &estimator_) { fprintf(stderr, "Sigma-consensus++ is not implemented yet as post-processing.\n"); return false; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_) { // Set up the parameters constexpr double L = 1.05; constexpr double k = ModelEstimator::getSigmaQuantile(); constexpr double threshold_to_sigma_multiplier = 1.0 / k; constexpr size_t sample_size = estimator_.sampleSize(); static auto comparator = [](std::pair<double, int> left, std::pair<double, int> right) { return left.first < right.first; }; const int point_number = points_.rows; double current_maximum_sigma = this->maximum_threshold; // Calculating the residuals std::vector< std::pair<double, size_t> > all_residuals; all_residuals.reserve(point_number); // If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better if (best_score_.inlier_number > 0) { // Number of inliers which should be exceeded int points_remaining = best_score_.inlier_number; // Collect the points which are closer than the threshold which the maximum sigma implies for (int point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index all_residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) --points_remaining; } // Interrupt if there is no chance of being better // TODO: replace this part by SPRT test if (point_number - point_idx < points_remaining) return false; } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = best_score_.inlier_number - points_remaining; } else { // The number of really close points size_t points_close = 0; // Collect the points which are closer than the threshold which the maximum sigma implies for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index all_residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; } std::vector<gcransac::Model> sigma_models; std::vector<size_t> sigma_inliers; std::vector<double> final_weights; // The number of possible inliers const size_t possible_inlier_number = all_residuals.size(); // Sort the residuals in ascending order std::sort(all_residuals.begin(), all_residuals.end(), comparator); // The maximum threshold is set to be slightly bigger than the distance of the // farthest possible inlier. current_maximum_sigma = all_residuals.back().first + std::numeric_limits<double>::epsilon(); const double sigma_step = current_maximum_sigma / partition_number; last_iteration_number = 10000; score_.score = 0; // The weights calculated by each parallel process std::vector<std::vector<double>> point_weights_par(partition_number, std::vector<double>(possible_inlier_number, 0)); // If OpenMP is used, calculate things in parallel #ifdef USE_OPENMP #pragma omp parallel for num_threads(core_number) for (int partition_idx = 0; partition_idx < partition_number; ++partition_idx) { // The maximum sigma value in the current partition const double max_sigma = (partition_idx + 1) * sigma_step; // Find the last element which has smaller distance than 'max_threshold' // Since the vector is ordered binary search can be used to find that particular element. const auto &last_element = std::upper_bound(all_residuals.begin(), all_residuals.end(), std::make_pair(max_sigma, 0), comparator); const size_t sigma_inlier_number = last_element - all_residuals.begin(); // Put the indices into a vector std::vector<size_t> sigma_inliers; sigma_inliers.reserve(sigma_inlier_number); // Store the points which are closer than the current sigma limit for (size_t relative_point_idx = 0; relative_point_idx < sigma_inlier_number; ++relative_point_idx) sigma_inliers.emplace_back(all_residuals[relative_point_idx].second); // Check if there are enough inliers to fit a model if (sigma_inliers.size() > sample_size) { // Estimating the model which the current set of inliers imply std::vector<gcransac::Model> sigma_models; estimator_.estimateModelNonminimal(points_, &(sigma_inliers)[0], sigma_inlier_number, &sigma_models); // If the estimation was successful calculate the implied probabilities if (sigma_models.size() == 1) { const double max_sigma_squared_2 = 2 * max_sigma * max_sigma; double residual_i_2, // The residual of the i-th point probability_i; // The probability of the i-th point // Iterate through all points to estimate the related probabilities for (size_t relative_point_idx = 0; relative_point_idx < sigma_inliers.size(); ++relative_point_idx) { // TODO: Replace with Chi-square instead of normal distribution const size_t &point_idx = sigma_inliers[relative_point_idx]; // Calculate the residual of the current point residual_i_2 = estimator_.squaredResidual(points_.row(point_idx), sigma_models[0]); // Calculate the probability of the i-th point assuming Gaussian distribution // TODO: replace by Chi-square distribution probability_i = exp(-residual_i_2 / max_sigma_squared_2); // Store the probability of the i-th point coming from the current partition point_weights_par[partition_idx][relative_point_idx] += probability_i; } } } } #else fprintf(stderr, "Not implemented yet.\n"); #endif // The weights used for the final weighted least-squares fitting final_weights.reserve(possible_inlier_number); // Collect all points which has higher probability of being inlier than zero sigma_inliers.reserve(possible_inlier_number); for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx) { // Calculate the weight of the current point double weight = 0.0; for (size_t partition_idx = 0; partition_idx < partition_number; ++partition_idx) weight += point_weights_par[partition_idx][point_idx]; // If the weight is approx. zero, continue. if (weight < std::numeric_limits<double>::epsilon()) continue; // Store the index and weight of the current point sigma_inliers.emplace_back(all_residuals[point_idx].second); final_weights.emplace_back(weight); } // If there are fewer inliers than the size of the minimal sample interupt the procedure if (sigma_inliers.size() < sample_size) return false; // Estimate the model parameters using weighted least-squares fitting if (!estimator_.estimateModelNonminimal( points_, // All input points &(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier static_cast<int>(sigma_inliers.size()), // Number of possible inliers &sigma_models, // Estimated models &(final_weights)[0])) // Weights of points return false; bool is_model_updated = false; if (sigma_models.size() == 1 && // If only a single model is estimated estimator_.isValidModel(*std::prev(sigma_models.end()), points_, sigma_inliers, &(sigma_inliers)[0], interrupting_threshold, is_model_updated)) // and it is valid { // Return the refined model refined_model_ = sigma_models.back(); // Calculate the score of the model and the implied iteration number double marginalized_iteration_number; getModelQuality(points_, // All the input points refined_model_, // The estimated model estimator_, // The estimator marginalized_iteration_number, // The marginalized inlier ratio score_.score); // The marginalized score if (marginalized_iteration_number < 0 || std::isnan(marginalized_iteration_number)) last_iteration_number = std::numeric_limits<int>::max(); else last_iteration_number = static_cast<int>(round(marginalized_iteration_number)); return true; } return false; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensusPlusPlus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_) { // The degrees of freedom of the data from which the model is estimated. // E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4. constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom(); // A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals constexpr double k = ModelEstimator::getSigmaQuantile(); // A multiplier to convert residual values to sigmas constexpr double threshold_to_sigma_multiplier = 1.0 / k; // Calculating k^2 / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double squared_k_per_2 = k * k / 2.0; // Calculating (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0; // TODO: check constexpr double C = ModelEstimator::getC(); // The size of a minimal sample used for the estimation constexpr size_t sample_size = estimator_.sampleSize(); // Calculating 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof = std::pow(2.0, dof_minus_one_per_two); // Calculating C * 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double C_times_two_ad_dof = C * two_ad_dof; // Calculating the gamma value of (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double gamma_value = tgamma(dof_minus_one_per_two); // Calculating the upper incomplete gamma value of (DoF - 1) / 2 with k^2 / 2. constexpr double gamma_k = ModelEstimator::getUpperIncompleteGammaOfK(); // Calculating the lower incomplete gamma value of (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double gamma_difference = gamma_value - gamma_k; // The number of points provided const int point_number = points_.rows; // The manually set maximum inlier-outlier threshold double current_maximum_sigma = this->maximum_threshold; // Calculating the pairs of (residual, point index). std::vector< std::pair<double, size_t> > residuals; // Occupy the maximum required memory to avoid doing it later. residuals.reserve(point_number); // If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better if (best_score_.inlier_number > 0) { // Number of points close to the previous so-far-the-best model. // This model should have more inliers. int points_remaining = best_score_.inlier_number; // Collect the points which are closer than the threshold which the maximum sigma implies for (int point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // all_residuals.emplace_back(std::make_pair(residual * threshold_to_sigma_multiplier, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) --points_remaining; } // Interrupt if there is no chance of being better // TODO: replace this part by SPRT test if (point_number - point_idx < points_remaining) return false; } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = best_score_.inlier_number - points_remaining; } else { // The number of really close points size_t points_close = 0; // Collect the points which are closer than the threshold which the maximum sigma implies for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; } // Models fit by weighted least-squares fitting std::vector<gcransac::Model> sigma_models; // Points used in the weighted least-squares fitting std::vector<size_t> sigma_inliers; // Weights used in the the weighted least-squares fitting std::vector<double> sigma_weights; // Number of points considered in the fitting const size_t possible_inlier_number = residuals.size(); // Occupy the memory to avoid doing it inside the calculation possibly multiple times sigma_inliers.reserve(possible_inlier_number); // Occupy the memory to avoid doing it inside the calculation possibly multiple times sigma_weights.reserve(possible_inlier_number); // Calculate 2 * \sigma_{max}^2 a priori const double squared_sigma_max_2 = current_maximum_sigma * current_maximum_sigma * 2.0; // Divide C * 2^(DoF - 1) by \sigma_{max} a priori const double one_over_sigma = C_times_two_ad_dof / current_maximum_sigma; // Calculate the weight of a point with 0 residual (i.e., fitting perfectly) a priori const double weight_zero = one_over_sigma * gamma_difference; // Initialize the polished model with the initial one gcransac::Model polished_model = model_; // A flag to determine if the initial model has been updated bool updated = false; // Do the iteratively re-weighted least squares fitting for (size_t iterations = 0; iterations < number_of_irwls_iters; ++iterations) { // If the current iteration is not the first, the set of possibly inliers // (i.e., points closer than the maximum threshold) have to be recalculated. if (iterations > 0) { // The number of points close to the model size_t points_close = 0; // Remove everything from the residual vector residuals.clear(); // Collect the points which are closer than the maximum threshold for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), polished_model); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; // Number of points closer than the threshold const size_t possible_inlier_number = residuals.size(); // Clear the inliers and weights sigma_inliers.clear(); sigma_weights.clear(); // Occupy the memory for the inliers and weights sigma_inliers.reserve(possible_inlier_number); sigma_weights.reserve(possible_inlier_number); } // Calculate the weight of each point for (size_t res_idx = 0; res_idx < residuals.size(); ++res_idx) { const std::pair<double, size_t> &pair = residuals[res_idx]; const double &residual = pair.first; const size_t &idx = pair.second; // The weight double weight = 0.0; // If the residual is ~0, the point fits perfectly and it is handled differently if (residual < std::numeric_limits<double>::epsilon()) weight = weight_zero; else { // Calculate the squared residual const double squared_residual = residual * residual; // Get the position of the gamma value in the lookup table size_t x = round(precision_of_stored_gammas * squared_residual / squared_sigma_max_2); // Put the index of the point into the vector of points used for the least squares fitting sigma_inliers.emplace_back(idx); // If the sought gamma value is not stored in the lookup, return the closest element if (stored_gamma_number < x) x = stored_gamma_number; // Calculate the weight of the point weight = one_over_sigma * (stored_gamma_values[x] - gamma_k); } // Store the weight of the point sigma_weights.emplace_back(weight); } // If there are fewer than the minimum point close to the model, // terminate. if (sigma_inliers.size() < sample_size) return false; // Estimate the model parameters using weighted least-squares fitting if (!estimator_.estimateModelNonminimal( points_, // All input points &(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier static_cast<int>(sigma_inliers.size()), // Number of possible inliers &sigma_models, // Estimated models &(sigma_weights)[0])) // Weights of points { // If the estimation failed and the iteration was never successfull, // terminate with failure. if (iterations == 0) return false; // Otherwise, if the iteration was successfull at least one, // simply break it. break; } // Update the model parameters polished_model = sigma_models[0]; // Clear the vector of models and keep only the best sigma_models.clear(); // The model has been updated updated = true; } bool is_model_updated = false; if (updated && // If the model has been updated estimator_.isValidModel(polished_model, points_, sigma_inliers, &(sigma_inliers[0]), interrupting_threshold, is_model_updated)) // and it is valid { // Return the refined model refined_model_ = polished_model; // Calculate the score of the model and the implied iteration number double marginalized_iteration_number; getModelQualityPlusPlus(points_, // All the input points refined_model_, // The estimated model estimator_, // The estimator score_.score, // The marginalized score best_score_.score); // The score of the previous so-far-the-best model // Update the iteration number last_iteration_number = log_confidence / log(1.0 - std::pow(static_cast<double>(score_.inlier_number) / point_number, sample_size)); return true; } return false; } template <class DatumType, class ModelEstimator> void MAGSAC<DatumType, ModelEstimator>::getModelQualityPlusPlus( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &score_, // The score to be calculated const double &previous_best_score_) // The score of the previous so-far-the-best model { // The degrees of freedom of the data from which the model is estimated. // E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4. constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom(); // A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals constexpr double k = ModelEstimator::getSigmaQuantile(); // A multiplier to convert residual values to sigmas constexpr double threshold_to_sigma_multiplier = 1.0 / k; // Calculating k^2 / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double squared_k_per_2 = k * k / 2.0; // Calculating (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0; // Calculating (DoF + 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_plus_one_per_two = (degrees_of_freedom + 1.0) / 2.0; // TODO: check constexpr double C = 0.25; // Calculating 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof_minus_one = std::pow(2.0, dof_minus_one_per_two); // Calculating 2^(DoF + 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof_plus_one = std::pow(2.0, dof_plus_one_per_two); // Calculate the gamma value of k constexpr double gamma_value_of_k = ModelEstimator::getUpperIncompleteGammaOfK(); // Calculate the lower incomplete gamma value of k constexpr double lower_gamma_value_of_k = ModelEstimator::getLowerIncompleteGammaOfK(); // The number of points provided const int point_number = points_.rows; // The previous best loss const double previous_best_loss = 1.0 / previous_best_score_; // Convert the maximum threshold to a sigma value const double maximum_sigma = threshold_to_sigma_multiplier * maximum_threshold; // Calculate the squared maximum sigma const double maximum_sigma_2 = maximum_sigma * maximum_sigma; // Calculate \sigma_{max}^2 / 2 const double maximum_sigma_2_per_2 = maximum_sigma_2 / 2.0; // Calculate 2 * \sigma_{max}^2 const double maximum_sigma_2_times_2 = maximum_sigma_2 * 2.0; // Calculate the loss implied by an outlier const double outlier_loss = maximum_sigma * two_ad_dof_minus_one * lower_gamma_value_of_k; // Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. const double two_ad_dof_plus_one_per_maximum_sigma = two_ad_dof_plus_one / maximum_sigma; // The loss which a point implies double loss = 0.0, // The total loss regarding the current model total_loss = 0.0; // Iterate through all points to calculate the implied loss for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residualForScoring(points_.row(point_idx), model_.descriptor); // If the residual is smaller than the maximum threshold, consider it outlier // and add the loss implied to the total loss. if (maximum_threshold < residual) loss = outlier_loss; else // Otherwise, consider the point inlier, and calculate the implied loss { // Calculate the squared residual const double squared_residual = residual * residual; // Divide the residual by the 2 * \sigma^2 const double squared_residual_per_sigma = squared_residual / maximum_sigma_2_times_2; // Get the position of the gamma value in the lookup table size_t x = round(precision_of_stored_incomplete_gammas * squared_residual_per_sigma); // If the sought gamma value is not stored in the lookup, return the closest element if (stored_incomplete_gamma_number < x) x = stored_incomplete_gamma_number; // Calculate the loss implied by the current point loss = maximum_sigma_2_per_2 * stored_lower_incomplete_gamma_values[x] + squared_residual / 4.0 * (stored_complete_gamma_values[x] - gamma_value_of_k); loss = loss * two_ad_dof_plus_one_per_maximum_sigma; } // Update the total loss total_loss += loss; // Break the validation if there is no chance of being better than the previous // so-far-the-best model. if (previous_best_loss < total_loss) break; } // Calculate the score of the model from the total loss score_ = 1.0 / total_loss; } template <class DatumType, class ModelEstimator> void MAGSAC<DatumType, ModelEstimator>::getModelQuality( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &marginalized_iteration_number_, // The marginalized iteration number to be calculated double &score_) // The score to be calculated { // Set up the parameters constexpr size_t sample_size = estimator_.sampleSize(); const size_t point_number = points_.rows; // Getting the inliers std::vector<std::pair<double, size_t>> all_residuals; all_residuals.reserve(point_number); double max_distance = 0; for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residualForScoring(points_.row(point_idx), model_.descriptor); // If the residual is smaller than the maximum threshold, add it to the set of possible inliers if (maximum_threshold > residual) { max_distance = MAX(max_distance, residual); all_residuals.emplace_back(std::make_pair(residual, point_idx)); } } // Set the maximum distance to be slightly bigger than that of the farthest possible inlier max_distance = max_distance + std::numeric_limits<double>::epsilon(); // Number of possible inliers const size_t possible_inlier_number = all_residuals.size(); // The extent of a partition const double threshold_step = max_distance / partition_number; // The maximum threshold considered in each partition std::vector<double> thresholds(partition_number); std::vector<double> thresholds_squared(partition_number); std::vector<double> thresholds_2_squared(partition_number); // Calculating the thresholds for each partition for (size_t i = 0; i < partition_number; ++i) { thresholds[i] = (i + 1) * threshold_step; thresholds_squared[i] = thresholds[i] * thresholds[i]; thresholds_2_squared[i] = 2 * thresholds_squared[i]; } double residual_i, // Residual of the i-th point residual_i_squared, // Squared residual of the i-th poin probability_i; // Probability of the i-th point given the model std::vector<double> inliers(partition_number, 0), // RANSAC score for each partition probabilities(partition_number, 1); // Probabilities for each partition for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx) { residual_i = all_residuals[point_idx].first; residual_i_squared = residual_i * residual_i; for (size_t i = 0; i < partition_number; ++i) { if (residual_i < thresholds[i]) { probability_i = 1.0 - residual_i_squared / thresholds_squared[i]; ++inliers[i]; probabilities[i] += probability_i; } } } score_ = 0; marginalized_iteration_number_ = 0.0; for (auto i = 0; i < partition_number; ++i) { score_ += probabilities[i]; marginalized_iteration_number_ += log_confidence / log(1.0 - std::pow(inliers[i] / point_number, sample_size)); } marginalized_iteration_number_ = marginalized_iteration_number_ / partition_number; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ #if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT #include "MagickCore/threshold-map.h" #else static const char *const BuiltinMap= "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; #endif /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if ((width == 0) || (height == 0)) return(threshold_image); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorThresholdImage() forces all pixels in the color range to white % otherwise black. % % The format of the ColorThresholdImage method is: % % MagickBooleanType ColorThresholdImage(Image *image, % const PixelInfo *start_color,const PixelInfo *stop_color, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o start_color, stop_color: define the start and stop color range. Any % pixel within the range returns white otherwise black. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorThresholdImage(Image *image, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo start, stop; ssize_t y; /* Color threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=AcquireImageColormap(image,2,exception); if (status == MagickFalse) return(status); start=(*start_color); stop=(*stop_color); switch (image->colorspace) { case HCLColorspace: { ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSBColorspace: { ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSLColorspace: { ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSVColorspace: { ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HWBColorspace: { ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case LabColorspace: { ConvertRGBToLab(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } default: { start.red*=QuantumScale; start.green*=QuantumScale; start.blue*=QuantumScale; stop.red*=QuantumScale; stop.green*=QuantumScale; stop.blue*=QuantumScale; break; } } start.red*=QuantumRange; start.green*=QuantumRange; start.blue*=QuantumRange; stop.red*=QuantumRange; stop.green*=QuantumRange; stop.blue*=QuantumRange; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickBooleanType foreground = MagickTrue; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((q[i] < GetPixelInfoChannel(&start,channel)) || (q[i] > GetPixelInfoChannel(&stop,channel))) foreground=MagickFalse; } SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->colorspace=sRGBColorspace; return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels to dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with an ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { (void) GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=(Quantum) 0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=(Quantum) 0; else q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
main.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ /* These need to be before any possible inclusions of stdint.h or inttypes.h. * */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include "../generator/make_graph.h" #include "../generator/utils.h" #include "common.h" #include <math.h> #include <mpi.h> #include <assert.h> #include <string.h> #include <stdlib.h> #include <stddef.h> #include <stdio.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> static int compare_doubles(const void* a, const void* b) { double aa = *(const double*)a; double bb = *(const double*)b; return (aa < bb) ? -1 : (aa == bb) ? 0 : 1; } enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST}; static void get_statistics(const double x[], int n, double r[s_LAST]) { double temp; int i; /* Compute mean. */ temp = 0; for (i = 0; i < n; ++i) temp += x[i]; temp /= n; r[s_mean] = temp; /* Compute std. dev. */ temp = 0; for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]); temp /= n - 1; r[s_std] = sqrt(temp); /* Sort x. */ double* xx = (double*)xmalloc(n * sizeof(double)); memcpy(xx, x, n * sizeof(double)); qsort(xx, n, sizeof(double), compare_doubles); /* Get order statistics. */ r[s_minimum] = xx[0]; r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5; r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5; r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5; r[s_maximum] = xx[n - 1]; /* Clean up. */ free(xx); } int main(int argc, char** argv) { MPI_Init(&argc, &argv); setup_globals(); /* Parse arguments. */ int SCALE = 16; int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ if (argc >= 2) SCALE = atoi(argv[1]); if (argc >= 3) edgefactor = atoi(argv[2]); if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) { if (rank == 0) { fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); } MPI_Abort(MPI_COMM_WORLD, 1); } uint64_t seed1 = 2, seed2 = 3; const char* filename = getenv("TMPFILE"); /* If filename is NULL, store data in memory */ tuple_graph tg; tg.nglobaledges = (int64_t)(edgefactor) << SCALE; int64_t nglobalverts = (int64_t)(1) << SCALE; tg.data_in_file = (filename != NULL); if (tg.data_in_file) { MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL); MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile); MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge)); MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL); MPI_File_set_atomicity(tg.edgefile, 0); } /* Make the raw graph edges. */ /* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by * validator). */ int num_bfs_roots = 64; int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); int64_t max_used_vertex = 0; double make_graph_start = MPI_Wtime(); { /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(seed1, seed2, seed); /* As the graph is being generated, also keep a bitmap of vertices with * incident edges. We keep a grid of processes, each row of which has a * separate copy of the bitmap (distributed among the processes in the * row), and then do an allreduce at the end. This scheme is used to avoid * non-local communication and reading the file separately just to find BFS * roots. */ MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE; int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT); if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) { bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT); } int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes; int nrows = size / ranks_per_row; int my_row = -1, my_col = -1; unsigned char* restrict has_edge = NULL; MPI_Comm cart_comm; { int dims[2] = {size / ranks_per_row, ranks_per_row}; int periods[2] = {0, 0}; MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm); } int in_generating_rectangle = 0; if (cart_comm != MPI_COMM_NULL) { in_generating_rectangle = 1; { int dims[2], periods[2], coords[2]; MPI_Cart_get(cart_comm, 2, dims, periods, coords); my_row = coords[0]; my_col = coords[1]; } MPI_Comm this_col; MPI_Comm_split(cart_comm, my_col, my_row, &this_col); MPI_Comm_free(&cart_comm); has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes); memset(has_edge, 0, bitmap_size_in_bytes); /* Every rank in a given row creates the same vertices (for updating the * bitmap); only one writes them to the file (or final memory buffer). */ packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge)); MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows; /* fprintf(stderr, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row); */ if (tg.data_in_file) { tg.edgememory_size = 0; tg.edgememory = NULL; } else { int my_pos = my_row + my_col * nrows; int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ? (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) : -1; int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE; int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) + FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) + (my_pos == last_pos ? edges_left : 0); /* fprintf(stderr, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */ tg.edgememory_size = nedges; tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge)); } MPI_Offset block_idx; for (block_idx = 0; block_idx < block_limit; ++block_idx) { /* fprintf(stderr, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */ MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges); MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE); packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ? tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) : buf; /* fprintf(stderr, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */ if (!tg.data_in_file && block_idx % ranks_per_row == my_col) { assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size); } generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf); if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */ MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE); } ptrdiff_t i; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < edge_count; ++i) { int64_t src = get_v0_from_edge(&actual_buf[i]); int64_t tgt = get_v1_from_edge(&actual_buf[i]); if (src == tgt) continue; if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT)); } if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT)); } } } free(buf); #if 0 /* The allreduce for each root acts like we did this: */ MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col); #endif MPI_Comm_free(&this_col); } else { tg.edgememory = NULL; tg.edgememory_size = 0; } MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); /* Find roots and max used vertex */ { uint64_t counter = 0; int bfs_root_idx; for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root; while (1) { double d[2]; make_random_numbers(2, seed1, seed2, counter, d); root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts; counter += 2; if (counter > 2 * nglobalverts) break; int is_duplicate = 0; int i; for (i = 0; i < bfs_root_idx; ++i) { if (root == bfs_roots[i]) { is_duplicate = 1; break; } } if (is_duplicate) continue; /* Everyone takes the same path here */ int root_ok = 0; if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) { root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0; } MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); if (root_ok) break; } bfs_roots[bfs_root_idx] = root; } num_bfs_roots = bfs_root_idx; /* Find maximum non-zero-degree vertex. */ { int64_t i; max_used_vertex = 0; if (in_generating_rectangle) { for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) { if (i > nglobalverts) continue; if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) { max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes; break; } } } MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); } } if (in_generating_rectangle) { MPI_Free_mem(has_edge); } if (tg.data_in_file) { MPI_File_sync(tg.edgefile); } } double make_graph_stop = MPI_Wtime(); double make_graph_time = make_graph_stop - make_graph_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stderr, "graph_generation: %f s\n", make_graph_time); } /* Make user's graph data structure. */ double data_struct_start = MPI_Wtime(); make_graph_data_structure(&tg); double data_struct_stop = MPI_Wtime(); double data_struct_time = data_struct_stop - data_struct_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stderr, "construction_time: %f s\n", data_struct_time); } /* Number of edges visited in each BFS; a double so get_statistics can be * used directly. */ double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double)); /* Run BFS. */ int validation_passed = 1; double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); uint64_t nlocalverts = get_nlocalverts_for_pred(); int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); int bfs_root_idx; for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root = bfs_roots[bfs_root_idx]; if (rank == 0) fprintf(stderr, "Running BFS %d\n", bfs_root_idx); /* Clear the pred array. */ memset(pred, 0, nlocalverts * sizeof(int64_t)); /* Do the actual BFS. */ double bfs_start = MPI_Wtime(); run_bfs(root, &pred[0]); double bfs_stop = MPI_Wtime(); bfs_times[bfs_root_idx] = bfs_stop - bfs_start; if (rank == 0) fprintf(stderr, "Time for BFS %d is %f\n", bfs_root_idx, bfs_times[bfs_root_idx]); /* Validate result. */ if (rank == 0) fprintf(stderr, "Validating BFS %d\n", bfs_root_idx); double validate_start = MPI_Wtime(); int64_t edge_visit_count; int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); double validate_stop = MPI_Wtime(); validate_times[bfs_root_idx] = validate_stop - validate_start; if (rank == 0) fprintf(stderr, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]); edge_counts[bfs_root_idx] = (double)edge_visit_count; if (rank == 0) fprintf(stderr, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / bfs_times[bfs_root_idx]); if (!validation_passed_one) { validation_passed = 0; if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n"); break; } } MPI_Free_mem(pred); free(bfs_roots); free_graph_data_structure(); if (tg.data_in_file) { MPI_File_close(&tg.edgefile); } else { free(tg.edgememory); tg.edgememory = NULL; } /* Print results. */ if (rank == 0) { if (!validation_passed) { fprintf(stdout, "No results printed for invalid run.\n"); } else { int i; fprintf(stdout, "SCALE: %d\n", SCALE); fprintf(stdout, "edgefactor: %d\n", edgefactor); fprintf(stdout, "NBFS: %d\n", num_bfs_roots); fprintf(stdout, "graph_generation: %g\n", make_graph_time); fprintf(stdout, "num_mpi_processes: %d\n", size); fprintf(stdout, "construction_time: %g\n", data_struct_time); double stats[s_LAST]; get_statistics(bfs_times, num_bfs_roots, stats); fprintf(stdout, "min_time: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_time: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_time: %g\n", stats[s_maximum]); fprintf(stdout, "mean_time: %g\n", stats[s_mean]); fprintf(stdout, "stddev_time: %g\n", stats[s_std]); get_statistics(edge_counts, num_bfs_roots, stats); fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]); fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]); fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]); fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]); fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]); fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]); double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double)); for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i]; get_statistics(secs_per_edge, num_bfs_roots, stats); fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]); fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]); fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]); fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]); fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]); fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]); /* Formula from: * Title: The Standard Errors of the Geometric and Harmonic Means and * Their Application to Index Numbers * Author(s): Nilan Norris * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448 * Publisher(s): Institute of Mathematical Statistics * Stable URL: http://www.jstor.org/stable/2235723 * (same source as in specification). */ fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1))); free(secs_per_edge); secs_per_edge = NULL; free(edge_counts); edge_counts = NULL; get_statistics(validate_times, num_bfs_roots, stats); fprintf(stdout, "min_validate: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_validate: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_validate: %g\n", stats[s_maximum]); fprintf(stdout, "mean_validate: %g\n", stats[s_mean]); fprintf(stdout, "stddev_validate: %g\n", stats[s_std]); #if 0 for (i = 0; i < num_bfs_roots; ++i) { fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]); } #endif } } free(bfs_times); free(validate_times); cleanup_globals(); MPI_Finalize(); return 0; }
Tanh.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Tanh.c" #else void THNN_(Tanh_updateOutput)( THNNState *state, THTensor *input, THTensor *output) { THTensor_(tanh)(output, input); } void THNN_(Tanh_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output) { THNN_CHECK_SHAPE(output, gradOutput); THTensor_(resizeAs)(gradInput, output); if (output->nDimension == 1 || !THTensor_(isContiguous)(output) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, real z = *output_data; \ *gradInput_data = *gradOutput_data * (1. - z*z); ); } else { real* ptr_gradOutput = THTensor_(data)(gradOutput); real* ptr_gradInput = THTensor_(data)(gradInput); real* ptr_output = THTensor_(data)(output); long i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(gradInput); i++) { real z = ptr_output[i]; ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z); } } } #endif
omp_parallel_copyin.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel copyin directive.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp parallel copyin</ompts:directive> <ompts:dependences>omp critical,omp threadprivate</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" static int sum1 = 789; #pragma omp threadprivate(sum1) int <ompts:testcode:functionname>omp_parallel_copyin</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int sum, num_threads; </ompts:orphan:vars> int known_sum; sum = 0; sum1 = 7; num_threads = 0; #pragma omp parallel <ompts:check>copyin(sum1)</ompts:check> { /*printf("sum1=%d\n",sum1);*/ <ompts:orphan> int i; #pragma omp for for (i = 1; i < 1000; i++) { sum1 = sum1 + i; } /*end of for*/ #pragma omp critical { sum = sum + sum1; num_threads++; } /*end of critical*/ </ompts:orphan> } /* end of parallel*/ known_sum = (999 * 1000) / 2 + 7 * num_threads; return (known_sum == sum); } </ompts:testcode> </ompts:test>
familytree_par.c
#include "familytree.h" #include <omp.h> int parallel_traverse(tree *node) { if (node == NULL) return 0; int father_iq, mother_iq; #pragma omp task shared(father_iq) father_iq = parallel_traverse(node->father); mother_iq = parallel_traverse(node->mother); #pragma omp taskwait node->IQ = compute_IQ(node->data, father_iq, mother_iq); genius[node->id] = node->IQ; return node->IQ; } int traverse(tree *node, int numThreads){ #pragma omp parallel { #pragma omp single parallel_traverse(node); } return node->IQ; }
LAGraph_bfs_pushpull.c
//------------------------------------------------------------------------------ // LAGraph_bfs_pushpull: push-pull breadth-first search //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ #include "LAGraph_bfs_pushpull.h" #include "../config.h" //------------------------------------------------------------------------------ // LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search, // contributed by Tim Davis, Texas A&M. // LAGraph_bfs_pushpull computes the BFS of a graph from a single given // source node. The result is a vector v where v(i)=k if node i was placed // at level k in the BFS. // Usage: // info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ; // GrB_Vector *v: a vector containing the result, created on output. // v(i) = k is the BFS level of node i in the graph, where a source // node has v(source)=1. v(i) is implicitly zero if it is unreachable // from the source node. That is, GrB_Vector_nvals (&nreach,v) is the // size of the reachable set of the source node, for a single-source // BFS. v may be returned as sparse, or full. If full, v(i)=0 // indicates that node i was not reached. If sparse, the pattern of v // indicates the set of nodes reached. // GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing. // pi(source) = source+1 for source node. pi(i) = p+1 if p is the // parent of i. If pi is sparse, and pi(i) is not present, then node // i has not been reached. Otherwise, if pi is full, then pi(i)=0 // indicates that node i was not reached. // GrB_Matrix A: a square matrix of any type. The values of A are not // accessed. The presence of the entry A(i,j) indicates the edge // (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge. // GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm // is a conventional push-only BFS. If not NULL, AT must be the // transpose of A, and a push-pull algorithm is used (NOTE: this // assumes GraphBLAS stores its matrix in CSR form; see discussion // below). Results are undefined if AT is not NULL but not identical // to the transpose of A. // int64_t source: the source node for the BFS. // int64_t max_level: An optional limit on the levels searched for the // single-source BFS. If zero, then no limit is enforced. If > 0, // then only nodes with v(i) <= max_level will be visited. That is: // 1: just the source node, 2: the source and its neighbors, 3: the // source node, its neighbors, and their neighbors, etc. // bool vsparse: if the result v may remain very sparse, then set this // parameter to true. If v might have many entries, set it false. If // you are unsure, then set it to true. This parameter speeds up // the handling of v. If you guess wrong, there is a slight // performance penalty. The results are not affected by this // parameter, just the performance. This parameter is used only for // the single-source BFS. // single-source BFS: // Given a graph A, a source node, find all nodes reachable from the // source node. v(source)=1, v(i)=2 if edge (source,i) appears in the // graph, and so on. If node i is not reachable from source, then // implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not // an entry in this vector. // This algorithm can use the push-pull strategy, which requires both A and // AT=A' to be passed in. If the graph is known to be symmetric, then the same // matrix A can be passed in for both arguments. Results are undefined if AT // is not the transpose of A. // If only A or AT is passed in, then only single strategy will be used: push // or pull, but not both. In general, push-only performs well. A pull-only // strategy is possible but it is exceedingly slow. Assuming A and AT are both // in CSR format, then (let s = source node): // LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest) // LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good) // LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!) // If A and AT are both in CSC format, then: // LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest) // LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good) // LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!) // Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS // detects this case and refuses to do it. // The basic step of this algorithm computes A'*q where q is the 'queue' of // nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' = // A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing, // just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is // simultaneously a row and column vector, so q and q' are interchangeable. // To implement an efficient BFS using GraphBLAS, an assumption must be made in // LAGraph about how the matrix is stored, whether by row or by column (or // perhaps some other opaque data structure). The storage format has a huge // impact on the relative performance of vxm(q,A) and mxv(AT,q). // Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily // accessible. In terms of the graph A, this means that the out-adjacency // list of node i can be traversed in time O(out-degree of node i). // If AT is stored by row, then AT(i,:) is the in-adjacency list of node i, // and traversing row i of AT can be done in O(in-degree of node i) time. // The CSR (Compressed Sparse Row) format is the default for // SuiteSparse:GraphBLAS, but no assumption can be made about any particular // GraphBLAS library implementation. // If A and AT are both stored by column instead, then A(i,:) is not easy to // access. Instead, A(:,i) is the easily-accessible in-adjacency of node i, // and AT(:,i) is the out-adjancency. // A push step requires the out-adjacencies of each node, where as // a pull step requires the in-adjacencies of each node. // vxm(q,A) = A'*q, with A stored by row: a push step // mxv(AT,q) = A'*q, with AT stored by row: a pull step // vxm(q,A) = A'*q, with A stored by col: a pull step // mxv(AT,q) = A'*q, with AT stored by col: a push step // The GraphBLAS data structure is opaque. An implementation may decide to // store the matrix A in both formats, internally, so that it easily traverse // both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i) // can both be easily traversed). This would make a push-pull BFS easy to // implement using just the opaque GrB_Matrix A, but it doubles the storage. // Deciding which format to use automatically is not a simple task, // particularly since the decision must work well throughout GraphBLAS, not // just for the BFS. // MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column). // As a result, the MATLAB expression x=AT*q is a push step, computed using a // saxpy-based algorithm internally, and x=A'*q is a pull step, computed using // a dot product. // SuiteSparse:GraphBLAS can store a matrix in either format, but this requires // an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where // f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library // could be augmented in the future with f = Gxb_BY_BOTH. It currently does // not select the format automatically. As a result, if GxB_set is not used, // all its GrB_Matrix objects are stored by row (CSR). // SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via // GxB_set) the format, whether by row or by column. The hypersparsity of // A is selected automatically, with optional hints from the user application, // but a selection between hypersparsity vs standard CSR and CSC has no effect // on the push vs pull decision made here. // The push/pull and saxpy/dot connection can be described as follows. // Assume for these first two examples that MATLAB stores its matrices in CSR // format, where accessing A(i,:) is fast. // If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB // notation as: /* function x = vxm (q,A) % a push step: compute x = q'*A where q is a column vector x = sparse (1,n) for i = 1:n % a saxpy operation, using the ith row of A and the scalar q(i) x = x + q (i) * A (i,:) end */ // If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes // a dot product: /* function x = mxv (AT,q) % a pull step: compute x = AT*q where q is a column vector for i = 1:n % a dot-product of the ith row of AT and the column vector q x (i) = AT (i,:) * q end */ // The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and // mxv(AT,q) by default, where A and AT are stored by row by default. However, // they would be very slow in MATLAB, since it stores its sparse matrices in // CSC format. In that case, if A is stored by column and thus accessing // A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product // instead. These two snippets assume the matrices are both in CSR for, and // thus make more efficient use of MATLAB: /* function x = vxm (q,A) % a pull step: compute x = q'*A where q is a column vector for j = 1:n % a dot product of the row vector q' and the jth column of A x (j) = q' * A (:,j) end */ // If AT is stored by column, then x = mvx(AT,q) is /* function x = mxv (AT,q) % a push step: compute x = AT*q where q is a column vector for j = 1:n % a saxpy operation, using the jth column of AT and the scalar q(i) x = x + AT (:,j) * q end */ // In MATLAB, if q is a sparse column vector and A is a sparse matrix, then // x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a // dot product. You can view the code used internally in MATLAB for its sparse // matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT // packages, at http://suitesparse.com. // This raises an interesting puzzle for LAGraph, which is intended on being a // graph library that can be run on any implementation of GraphBLAS. There are // no mechanisms in the GraphBLAS C API for LAGraph (or other external packages // or user applications) to provide hints to GraphBLAS. Likely, there are no // query mechanisms where LAGraph can ask GraphBLAS how its matrices might be // stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer // from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it // does not answer this query. // There are two solutions to this puzzle. The most elegant one is for // GraphBLAS to handle all this internally, and change formats as needed. It // could choose to store A in both CSR and CSC format, or use an entirely // different data structure, and it would make the decision between the push or // pull, at each step of the BFS. This is not a simple task since the API is // complex. Furthermore, the selection of the data structure for A has // implications on all other GraphBLAS operations (submatrix assignment and // extraction, for example). // However, if A were to be stored in both CSR and CSC format, inside the // opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would // become a push-pull BFS. // The second solution is to allow the user application or library such as // LAGraph to provide hints and allow it to query the GraphBLAS library. // There are no such features in the GraphBLAS C API. // SuiteSparse:GraphBLAS takes the second approach: It adds two functions that // are extensions to the API: GxB_set changes the format (CSR or CSC), and // GxB_get can query the format. Even this this simplication, // SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm // (per semiring), and selects between them automatically. By default, all of // its matrices are stored in CSR format (either sparse or hypersparse, // selected automatically). So if no GxB_* extensions are used, all matrices // are in CSR format. // If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this // particular function assumes that its input matrices are in CSR format, or at // least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it // is the responsibilty of this function to select between using a push or a // pull, for each step in the BFS. // The following analysis assumes CSR format, and it assumes that dot-product // (a pull step) can terminate early via a short-circuit rule with the OR // monoid, as soon as it encounters a TRUE value. This cuts the time for the // dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse: // GraphBLAS does (in version 2.3.0 and later). Early termination cannot be // done for the saxpy (push step) method. // The work done by the push method (saxpy) is very predictable. BFS uses a // complemented mask. There is no simple way to exploit a complemented mask, // and saxpy has no early termination rule. If the set of nodes in the current // level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree, // this becomes d*nq where nq = length (q): // pushwork = d*nq // The work done by the pull (dot product) method is less predictable. It can // exploit the complemented mask, and so it only computes (n-nvisited) dot // products, if nvisited is the # of nodes visited so far (in all levels). // With no early-termination, the dot product will take d * log2 (nq) time, // assuming that q is large and a binary search is used internally. That is, // the dot product will scan through the d entries in A(i,:), and do a binary // search for each entry in q. To account for the higher constant of a binary // search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination, // d is too high. If the nodes are randomly marked, the probability of each // node being marked is nvisited/n. The expected number of trials until // success, for a sequence of events with probabilty p, is 1/p. Thus, the // expected number of iterations in a dot product before an early termination // is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero. // However, it cannot exceed d. Thus, the total work for the dot product // (pull) method can be estimated as: // per_dot = min (d, n / (nvisited+1)) // pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq))) // The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later, // and may be reasonable for other GraphBLAS implementations. Push or pull // is selected as the one with the least work. // TODO: change the formula for v3.2.0 // The push/pull decision requires that both A and AT be passed in, but this // function can use just one or the other. If only A is passed in and AT is // NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull // step if A is CSC). If only AT is passed in and A is NULL, then only // mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is // CSC). // In general, while a push-pull strategy is the fastest, a push-only BFS will // give good peformance. In particular, the time to compute AT=A' plus the // time for the push-pull BFS is typically higher than just a push-only BFS. // This why this function does not compute AT=A'. To take advantage of the // push-pull method, both A and AT must already be available, with the cost to // construct them amortized across other computations such as this one. // A pull-only strategy will be *exceeding* slow. // The input matrix A must be square. It can be non-binary, but best // performance will be obtained if it is GrB_BOOL. It can have explicit // entries equal to zero. These are safely ignored, and are treated as // non-edges. // SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs. // In this case, if both matrices are provided, they must be in the same // format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC // format, vxm(q,A) is the pull step and mxv(AT,q) is the push step. // If only A or AT are provided, and the result is a pull-only algorithm, // an error is returned. // References: // Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull // Efficiently in GraphBLAS. In Proceedings of the 47th International // Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA, // Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122 // Scott Beamer, Krste Asanovic and David A. Patterson, // The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015. // http://gap.cs.berkeley.edu/ #define LAGRAPH_FREE_ALL \ { \ GrB_free (&v) ; \ GrB_free (&t) ; \ GrB_free (&q) ; \ GrB_free (&pi) ; \ } #define LAGRAPH_ERROR(message,info) \ { \ fprintf (stderr, "LAGraph error: %s\n[%d]\nFile: %s Line: %d\n", \ message, info, __FILE__, __LINE__) ; \ LAGRAPH_FREE_ALL ; \ return (info) ; \ } #define LAGRAPH_MAX(x,y) (((x) > (y)) ? (x) : (y)) #define LAGRAPH_MIN(x,y) (((x) < (y)) ? (x) : (y)) GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL ( GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i. // if NULL, the parent is not computed. GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Matrix AT, // transpose of A (optional; push-only if NULL) int64_t source, // starting node of the BFS int64_t max_level, // optional limit of # levels to search bool vsparse // if true, v is expected to be very sparse ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Vector q = NULL ; // nodes visited at each level GrB_Vector v = NULL ; // result vector GrB_Vector t = NULL ; // temporary vector GrB_Vector pi = NULL ; // parent vector if(v_output == NULL || (A == NULL && AT == NULL)) { // required output argument is missing LAGRAPH_ERROR("required arguments are NULL", GrB_NULL_POINTER) ; } (*v_output) = NULL ; bool compute_tree = (pi_output != NULL) ; GrB_Descriptor desc_s = GrB_DESC_S ; GrB_Descriptor desc_sc = GrB_DESC_SC ; GrB_Descriptor desc_rc = GrB_DESC_RC ; GrB_Descriptor desc_r = GrB_DESC_R ; GrB_Index nrows, ncols, nvalA, ignore, nvals ; // A is provided. AT may or may not be provided GrB_Matrix_nrows(&nrows, A) ; GrB_Matrix_ncols(&ncols, A) ; GrB_Matrix_nvals(&nvalA, A) ; bool use_vxm_with_A = true ; // push/pull requires both A and AT bool push_pull = (A != NULL && AT != NULL) ; if(nrows != ncols) { // A must be square LAGRAPH_ERROR("A must be square", GrB_NULL_POINTER) ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- GrB_Index n = nrows ; int nthreads; Config_Option_get(Config_OPENMP_NTHREAD, &nthreads); nthreads = LAGRAPH_MIN(n / 4096, nthreads) ; nthreads = LAGRAPH_MAX(nthreads, 1) ; // just traverse from the source node max_level = (max_level <= 0) ? n : LAGRAPH_MIN(n, max_level) ; // create an empty vector v GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ; GrB_Vector_new(&v, int_type, n) ; // make v dense if requested int64_t vlimit = LAGRAPH_MAX(256, sqrt((double) n)) ; if(!vsparse) { // v is expected to have many entries, so convert v to dense. // If the guess is wrong, v can be made dense later on. GrB_assign(v, NULL, NULL, 0, GrB_ALL, n, NULL) ; } GrB_Semiring first_semiring, second_semiring ; if(compute_tree) { // create an integer vector q, and set q(source) to source+1 GrB_Vector_new(&q, int_type, n) ; GrB_Vector_setElement(q, source + 1, source) ; if(n > INT32_MAX) { // terminates as soon as it finds any parent; nondeterministic first_semiring = GxB_ANY_FIRST_INT64 ; second_semiring = GxB_ANY_SECOND_INT64 ; } else { // terminates as soon as it finds any parent; nondeterministic first_semiring = GxB_ANY_FIRST_INT32 ; second_semiring = GxB_ANY_SECOND_INT32 ; } // create the empty parent vector GrB_Vector_new(&pi, int_type, n) ; if(!vsparse) { // make pi a dense vector of all zeros GrB_assign(pi, NULL, NULL, 0, GrB_ALL, n, NULL) ; } // pi (source) = source+1 denotes a root of the BFS tree GrB_Vector_setElement(pi, source + 1, source) ; } else { // create a boolean vector q, and set q(source) to true GrB_Vector_new(&q, GrB_BOOL, n) ; GrB_Vector_setElement(q, true, source) ; // terminates as soon as it finds any pair first_semiring = GxB_ANY_PAIR_BOOL ; second_semiring = GxB_ANY_PAIR_BOOL ; } // average node degree double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ; int64_t nvisited = 0 ; // # nodes visited so far GrB_Index nq = 1 ; // number of nodes in the current level //-------------------------------------------------------------------------- // BFS traversal and label the nodes //-------------------------------------------------------------------------- for(int64_t level = 1 ; ; level++) { //---------------------------------------------------------------------- // set v to the current level, for all nodes in q //---------------------------------------------------------------------- // v<q> = level: set v(i) = level for all nodes i in q GrB_assign(v, q, NULL, level, GrB_ALL, n, desc_s) ; //---------------------------------------------------------------------- // check if done //---------------------------------------------------------------------- nvisited += nq ; if(nq == 0 || nvisited == n || level >= max_level) break ; //---------------------------------------------------------------------- // check if v should be converted to dense //---------------------------------------------------------------------- if(vsparse && nvisited > vlimit) { // Convert v from sparse to dense to speed up the rest of the work. // If this case is triggered, it would have been faster to pass in // vsparse = false on input. // v <!v> = 0 GrB_assign(v, v, NULL, 0, GrB_ALL, n, desc_sc) ; GrB_Vector_nvals(&ignore, v) ; if(compute_tree) { // Convert pi from sparse to dense, to speed up the work. // pi<!pi> = 0 GrB_assign(pi, pi, NULL, 0, GrB_ALL, n, desc_sc) ; GrB_Vector_nvals(&ignore, pi) ; } vsparse = false ; } //---------------------------------------------------------------------- // select push vs pull //---------------------------------------------------------------------- if(push_pull) { double pushwork = d * nq ; double expected = (double) n / (double)(nvisited + 1) ; double per_dot = LAGRAPH_MIN(d, expected) ; double binarysearch = (3 * (1 + log2((double) nq))) ; double pullwork = (n - nvisited) * per_dot * binarysearch ; use_vxm_with_A = (pushwork < pullwork) ; } //---------------------------------------------------------------------- // q = next level of the BFS //---------------------------------------------------------------------- if(use_vxm_with_A) { // q'<!v> = q'*A // this is a push step if A is in CSR format; pull if CSC GrB_vxm(q, v, NULL, first_semiring, q, A, desc_rc) ; } else { // q<!v> = AT*q // this is a pull step if AT is in CSR format; push if CSC GrB_mxv(q, v, NULL, second_semiring, AT, q, desc_rc) ; } //---------------------------------------------------------------------- // move to next level //---------------------------------------------------------------------- if(compute_tree) { //------------------------------------------------------------------ // assign parents //------------------------------------------------------------------ // q(i) currently contains the parent of node i in tree (off by one // so it won't have any zero values, for valued mask). // pi<q> = q GrB_assign(pi, q, NULL, q, GrB_ALL, n, desc_s) ; //------------------------------------------------------------------ // replace q with current node numbers //------------------------------------------------------------------ // TODO this could be a unaryop // q(i) = i+1 for all entries in q. GrB_Index *qi ; if(n > INT32_MAX) { int64_t *qx ; GxB_Vector_export(&q, &int_type, &n, &nq, &qi, (void **)(&qx), NULL) ; int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ; nth = LAGRAPH_MAX(nth, 1) ; #pragma omp parallel for num_threads(nth) schedule(static) for(int64_t k = 0 ; k < nq ; k++) { qx [k] = qi [k] + 1 ; } GxB_Vector_import(&q, int_type, n, nq, &qi, (void **)(&qx), NULL) ; } else { int32_t *qx ; GxB_Vector_export(&q, &int_type, &n, &nq, &qi, (void **)(&qx), NULL) ; int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ; nth = LAGRAPH_MAX(nth, 1) ; #pragma omp parallel for num_threads(nth) schedule(static) for(int32_t k = 0 ; k < nq ; k++) { qx [k] = qi [k] + 1 ; } GxB_Vector_import(&q, int_type, n, nq, &qi, (void **)(&qx), NULL) ; } } else { //------------------------------------------------------------------ // count the nodes in the current level //------------------------------------------------------------------ GrB_Vector_nvals(&nq, q) ; } } //-------------------------------------------------------------------------- // return the parent vector, if computed //-------------------------------------------------------------------------- if(compute_tree) { (*pi_output) = pi ; pi = NULL ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- (*v_output) = v ; // return result v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it LAGRAPH_FREE_ALL ; // free all workspace (except for result v) return (GrB_SUCCESS) ; }
utils.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #include "splittable_mrg.h" #include "graph_generator.h" #include <stdint.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef __MTA__ #include <sys/mta_task.h> #endif #ifdef GRAPH_GENERATOR_MPI #include <mpi.h> #endif #ifdef GRAPH_GENERATOR_OMP #include <omp.h> #endif #include "utils.h" #if defined(_OPENMP) #define OMP(x_) _Pragma(x_) #else #define OMP(x_) #endif #if defined(HAVE_LIBNUMA) #include <numa.h> static int numa_inited = 0; static int numa_avail = -1; void * xmalloc (size_t sz) { void * out; if (!numa_inited) { OMP("omp critical") { numa_inited = 1; numa_avail = numa_available (); } } if (numa_avail) out = numa_alloc (sz); else out = malloc (sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate %zu byte(s)\n", sz); abort (); } return out; } void * xcalloc (size_t n, size_t sz) { void * out; if (!numa_inited) { OMP("omp critical") { numa_inited = 1; numa_avail = numa_available (); } } if (numa_avail) { size_t to_alloc; to_alloc = n * sz; if (to_alloc < n || to_alloc < sz) { fprintf(stderr, "Allocation size out of range for %zu items of %zu byte(s)\n", n, sz); abort (); } out = numa_alloc (n * sz); #if defined(_OPENMP) #pragma omp parallel for for (size_t k = 0; k < n; ++k) memset (out + k * sz, 0, sz); #else memset (out, 0, n * sz); #endif } else out = calloc (n, sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate/clear %zu items of %zu byte(s)\n", n, sz); abort (); } return out; } void xfree (void * p, size_t sz) { if (!p) return; if (numa_avail >= 0) numa_free (p, sz); else free (p); } #else void * xmalloc (size_t sz) { void * out; out = malloc (sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate %zu byte(s)\n", sz); abort (); } return out; } void * xcalloc (size_t n, size_t sz) { void * out; out = calloc (n, sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate/clear %zu items of %zu byte(s)\n", n, sz); abort (); } return out; } void xfree (void * p, size_t sz) { free (p); } #endif /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ //void make_mrg_seed(uint64_t userseed1, uint64_t userseed2, uint_fast32_t* seed) void make_mrg_seed(uint32_t userseed1, uint32_t userseed2, uint_fast32_t* seed) { seed[0] = (uint32_t)(userseed1 & UINT32_C(0x3FFFFFFF)) + 1; seed[1] = (uint32_t)((userseed1 >> 30) & UINT32_C(0x3FFFFFFF)) + 1; seed[2] = (uint32_t)(userseed2 & UINT32_C(0x3FFFFFFF)) + 1; seed[3] = (uint32_t)((userseed2 >> 30) & UINT32_C(0x3FFFFFFF)) + 1; seed[4] = (uint32_t)((userseed2 >> 60) << 4) + (uint32_t)(userseed1 >> 60) + 1; }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p, *magick_restrict q; register Quantum *magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickBooleanType difference; register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q)); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ? GetPixelAlpha(image,p) : OpaqueAlpha); Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ? GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel=GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) >= MagickEpsilon) distortion[i]=20.0*MagickLog10((double) 1.0/sqrt(distortion[i])); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; register ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ shared(progress,status,similarity_metric) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); if (GetPixelWriteMask(similarity_image,q) == 0) { SetPixelBackgoundColor(similarity_image,q); q+=GetPixelChannels(similarity_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
CDIIS.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <omp.h> #include "linalg_lib_wrapper.h" #include "TinyDFT_typedef.h" #include "CDIIS.h" #include "utils.h" void TinyDFT_CDIIS(TinyDFT_p TinyDFT, const double *X_mat, const double *S_mat, const double *D_mat, double *F_mat) { int nbf = TinyDFT->nbf; int mat_size = TinyDFT->mat_size; int *ipiv = TinyDFT->DIIS_ipiv; double *F0_mat = TinyDFT->F0_mat; double *R_mat = TinyDFT->R_mat; double *B_mat = TinyDFT->B_mat; double *FDS_mat = TinyDFT->FDS_mat; double *DIIS_rhs = TinyDFT->DIIS_rhs; double *tmp_mat = TinyDFT->tmp_mat; int mat_msize = DBL_MSIZE * mat_size; int ldB = MAX_DIIS + 1; if (TinyDFT->iter <= 1) { // F = X^T * F * X // Use tmp_mat to store X^T * F cblas_dgemm( CblasRowMajor, CblasTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, X_mat, nbf, F_mat, nbf, 0.0, tmp_mat, nbf ); // Use F_mat to store X^T * F * X cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, tmp_mat, nbf, X_mat, nbf, 0.0, F_mat, nbf ); return; } int DIIS_idx; // Which historic F matrix will be replaced if (TinyDFT->DIIS_len < MAX_DIIS) { DIIS_idx = TinyDFT->DIIS_len; TinyDFT->DIIS_len++; } else { DIIS_idx = TinyDFT->DIIS_bmax_id; } // FDS = F * D * S; cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, F_mat, nbf, D_mat, nbf, 0.0, tmp_mat, nbf ); cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, tmp_mat, nbf, S_mat, nbf, 0.0, FDS_mat, nbf ); // Residual = X^T * (FDS - FDS^T) * X, use tmp_mat to store FDS - FDS^T #pragma omp parallel for for (int i = 0; i < nbf; i++) { double *tmp_i = tmp_mat + i * nbf; double *FDS_mat_ri = FDS_mat + i * nbf; double *FDS_mat_ci = FDS_mat + i; #pragma omp simd for (int j = 0; j < nbf; j++) tmp_i[j] = FDS_mat_ri[j] - FDS_mat_ci[j * nbf]; } // Use FDS_mat to store X^T * (FDS - FDS^T) cblas_dgemm( CblasRowMajor, CblasTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, X_mat, nbf, tmp_mat, nbf, 0.0, FDS_mat, nbf ); // Use tmp_mat to store X^T * (FDS - FDS^T) * X cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, FDS_mat, nbf, X_mat, nbf, 0.0, tmp_mat, nbf ); // In my MATLAB code, F_mat and its residual are treated as column vectors // For performance, we treat them as row vectors here // R(:, DIIS_idx) = X^T * (FDS - FDS^T) * X // B(i, j) = R(:, i) * R(:, j) // DIIS_rhs is not used yet, use it to store dot product results double *DIIS_dot = DIIS_rhs; memset(DIIS_dot, 0, DBL_MSIZE * (MAX_DIIS + 1)); memcpy(R_mat + mat_size * DIIS_idx, tmp_mat, mat_msize); double *Ri = R_mat + mat_size * DIIS_idx; for (int j = 0; j < TinyDFT->DIIS_len; j++) { double *Rj = R_mat + mat_size * j; DIIS_dot[j] = cblas_ddot(mat_size, Ri, 1, Rj, 1); } // Construct symmetric B // B(DIIS_idx, 1 : DIIS_len) = DIIS_dot(1 : DIIS_idx); // B(1 : DIIS_len, DIIS_idx) = DIIS_dot(1 : DIIS_idx); for (int i = 0; i < TinyDFT->DIIS_len; i++) { B_mat[DIIS_idx * ldB + i] = DIIS_dot[i]; B_mat[i * ldB + DIIS_idx] = DIIS_dot[i]; } // Pick an old F that its residual has the largest 2-norm for (int i = 0; i < TinyDFT->DIIS_len; i++) { if (B_mat[i * ldB + i] > TinyDFT->DIIS_bmax) { TinyDFT->DIIS_bmax = B_mat[i * ldB + i]; TinyDFT->DIIS_bmax_id = i; } } // F := X^T * F * X, F0(:, DIIS_idx) = F // Use tmp_mat to store X^T * F cblas_dgemm( CblasRowMajor, CblasTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, X_mat, nbf, F_mat, nbf, 0.0, tmp_mat, nbf ); // Use F_mat to store X^T * F * X cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, nbf, nbf, nbf, 1.0, tmp_mat, nbf, X_mat, nbf, 0.0, F_mat, nbf ); // Copy to F0 memcpy(F0_mat + mat_size * DIIS_idx, F_mat, mat_msize); // Solve the linear system memset(DIIS_rhs, 0, DBL_MSIZE * (MAX_DIIS + 1)); DIIS_rhs[TinyDFT->DIIS_len] = -1; // Copy B_mat to tmp_mat, since LAPACKE_dgesv will overwrite the input matrix memcpy(tmp_mat, B_mat, DBL_MSIZE * ldB * ldB); LAPACKE_dgesv(LAPACK_ROW_MAJOR, TinyDFT->DIIS_len + 1, 1, tmp_mat, ldB, ipiv, DIIS_rhs, 1); // Form new X^T * F * X memset(F_mat, 0, mat_msize); for (int i = 0; i < TinyDFT->DIIS_len; i++) cblas_daxpy(mat_size, DIIS_rhs[i], F0_mat + i * mat_size, 1, F_mat, 1); }
eratostene.c
/* Crivello di eratostene parallelo (openmp). * Parametri: * "int * sieve": array di int di lunghezza almeno n/2 opportunamente * allocato dal chiamante nel quale saranno marchiati con "1" i * numeri dispari primi. NOTA BENE: "sieve[i] = 1" indica che il * numero "(i * 2) + 1" è primo (sieve contiene solo numeri dispari) * "long unsigned n": numero fino al quale si desidera calcolare i primi * Compilazione: * $ gcc -c -lgmp -fopenmp -o eratostene.o eratostene.c */ #include <stdlib.h> #include <omp.h> void eratosthenes_sieve(unsigned int * sieve, long unsigned n) { int n_threads = omp_get_num_threads(); long unsigned chunk = (n/2)/n_threads; long unsigned i; #pragma omp parallel for schedule(dynamic, chunk) for(i = 0; i <= n/2; ++i) sieve[i] = 1; for(i = 3; i <= n; i += 2) { if(i*i > n) i = n; // -> break; if(sieve[i/2] == 1) { long unsigned j; #pragma omp parallel for schedule(dynamic, chunk) for(j = i; j <= n/i; j++) sieve[(i*j)/2] = 0; } } }
schur_eliminator_impl.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2015 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: sameeragarwal@google.com (Sameer Agarwal) // // TODO(sameeragarwal): row_block_counter can perhaps be replaced by // Chunk::start ? #ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ #define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ // Eigen has an internal threshold switching between different matrix // multiplication algorithms. In particular for matrices larger than // EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly // matrix matrix product algorithm that has a higher setup cost. For // matrix sizes close to this threshold, especially when the matrices // are thin and long, the default choice may not be optimal. This is // the case for us, as the default choice causes a 30% performance // regression when we moved from Eigen2 to Eigen3. #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10 // This include must come before any #ifndef check on Ceres compile options. #include "ceres/internal/port.h" #include <algorithm> #include <map> #include "ceres/block_random_access_matrix.h" #include "ceres/block_sparse_matrix.h" #include "ceres/block_structure.h" #include "ceres/internal/eigen.h" #include "ceres/internal/fixed_array.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/invert_psd_matrix.h" #include "ceres/map_util.h" #include "ceres/schur_eliminator.h" #include "ceres/scoped_thread_token.h" #include "ceres/small_blas.h" #include "ceres/stl_util.h" #include "ceres/thread_token_provider.h" #include "Eigen/Dense" #include "glog/logging.h" #ifdef CERES_USE_TBB #include <tbb/parallel_for.h> #include <tbb/task_arena.h> #endif namespace ceres { namespace internal { template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() { STLDeleteElements(&rhs_locks_); } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init( int num_eliminate_blocks, bool assume_full_rank_ete, const CompressedRowBlockStructure* bs) { CHECK_GT(num_eliminate_blocks, 0) << "SchurComplementSolver cannot be initialized with " << "num_eliminate_blocks = 0."; num_eliminate_blocks_ = num_eliminate_blocks; assume_full_rank_ete_ = assume_full_rank_ete; const int num_col_blocks = bs->cols.size(); const int num_row_blocks = bs->rows.size(); buffer_size_ = 1; chunks_.clear(); lhs_row_layout_.clear(); int lhs_num_rows = 0; // Add a map object for each block in the reduced linear system // and build the row/column block structure of the reduced linear // system. lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_); for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows; lhs_num_rows += bs->cols[i].size; } int r = 0; // Iterate over the row blocks of A, and detect the chunks. The // matrix should already have been ordered so that all rows // containing the same y block are vertically contiguous. Along // the way also compute the amount of space each chunk will need // to perform the elimination. while (r < num_row_blocks) { const int chunk_block_id = bs->rows[r].cells.front().block_id; if (chunk_block_id >= num_eliminate_blocks_) { break; } chunks_.push_back(Chunk()); Chunk& chunk = chunks_.back(); chunk.size = 0; chunk.start = r; int buffer_size = 0; const int e_block_size = bs->cols[chunk_block_id].size; // Add to the chunk until the first block in the row is // different than the one in the first row for the chunk. while (r + chunk.size < num_row_blocks) { const CompressedRow& row = bs->rows[r + chunk.size]; if (row.cells.front().block_id != chunk_block_id) { break; } // Iterate over the blocks in the row, ignoring the first // block since it is the one to be eliminated. for (int c = 1; c < row.cells.size(); ++c) { const Cell& cell = row.cells[c]; if (InsertIfNotPresent( &(chunk.buffer_layout), cell.block_id, buffer_size)) { buffer_size += e_block_size * bs->cols[cell.block_id].size; } } buffer_size_ = std::max(buffer_size, buffer_size_); ++chunk.size; } CHECK_GT(chunk.size, 0); r += chunk.size; } const Chunk& chunk = chunks_.back(); uneliminated_row_begins_ = chunk.start + chunk.size; if (num_threads_ > 1) { random_shuffle(chunks_.begin(), chunks_.end()); } buffer_.reset(new double[buffer_size_ * num_threads_]); // chunk_outer_product_buffer_ only needs to store e_block_size * // f_block_size, which is always less than buffer_size_, so we just // allocate buffer_size_ per thread. chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]); STLDeleteElements(&rhs_locks_); rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_); for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) { rhs_locks_[i] = new Mutex; } } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: Eliminate(const BlockSparseMatrix* A, const double* b, const double* D, BlockRandomAccessMatrix* lhs, double* rhs) { if (lhs->num_rows() > 0) { lhs->SetZero(); VectorRef(rhs, lhs->num_rows()).setZero(); } const CompressedRowBlockStructure* bs = A->block_structure(); const int num_col_blocks = bs->cols.size(); // Add the diagonal to the schur complement. if (D != NULL) { #ifdef CERES_USE_OPENMP #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) #endif // CERES_USE_OPENMP #ifndef CERES_USE_TBB for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { #else tbb::task_arena task_arena(num_threads_); task_arena.execute([&]{ tbb::parallel_for(num_eliminate_blocks_, num_col_blocks, [&](int i) { #endif // !CERES_USE_TBB const int block_id = i - num_eliminate_blocks_; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { const int block_size = bs->cols[i].size; typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(D + bs->cols[i].position, block_size); CeresMutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); m.block(r, c, block_size, block_size).diagonal() += diag.array().square().matrix(); } } #ifdef CERES_USE_TBB ); }); #endif // CERES_USE_TBB } ThreadTokenProvider thread_token_provider(num_threads_); #ifdef CERES_USE_OPENMP // Eliminate y blocks one chunk at a time. For each chunk, compute // the entries of the normal equations and the gradient vector block // corresponding to the y block and then apply Gaussian elimination // to them. The matrix ete stores the normal matrix corresponding to // the block being eliminated and array buffer_ contains the // non-zero blocks in the row corresponding to this y block in the // normal equations. This computation is done in // ChunkDiagonalBlockAndGradient. UpdateRhs then applies gaussian // elimination to the rhs of the normal equations, updating the rhs // of the reduced linear system by modifying rhs blocks for all the // z blocks that share a row block/residual term with the y // block. EliminateRowOuterProduct does the corresponding operation // for the lhs of the reduced linear system. #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) #endif // CERES_USE_OPENMP #ifndef CERES_USE_TBB for (int i = 0; i < chunks_.size(); ++i) { #else tbb::task_arena task_arena(num_threads_); task_arena.execute([&]{ tbb::parallel_for(0, int(chunks_.size()), [&](int i) { #endif // !CERES_USE_TBB const ScopedThreadToken scoped_thread_token(&thread_token_provider); const int thread_id = scoped_thread_token.token(); double* buffer = buffer_.get() + thread_id * buffer_size_; const Chunk& chunk = chunks_[i]; const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; VectorRef(buffer, buffer_size_).setZero(); typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size, e_block_size); if (D != NULL) { const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(D + bs->cols[e_block_id].position, e_block_size); ete = diag.array().square().matrix().asDiagonal(); } else { ete.setZero(); } FixedArray<double, 8> g(e_block_size); typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size); gref.setZero(); // We are going to be computing // // S += F'F - F'E(E'E)^{-1}E'F // // for each Chunk. The computation is broken down into a number of // function calls as below. // Compute the outer product of the e_blocks with themselves (ete // = E'E). Compute the product of the e_blocks with the // corresonding f_blocks (buffer = E'F), the gradient of the terms // in this chunk (g) and add the outer product of the f_blocks to // Schur complement (S += F'F). ChunkDiagonalBlockAndGradient( chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs); // Normally one wouldn't compute the inverse explicitly, but // e_block_size will typically be a small number like 3, in // which case its much faster to compute the inverse once and // use it to multiply other matrices/vectors instead of doing a // Solve call over and over again. typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete = InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete); // For the current chunk compute and update the rhs of the reduced // linear system. // // rhs = F'b - F'E(E'E)^(-1) E'b FixedArray<double, 8> inverse_ete_g(e_block_size); MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>( inverse_ete.data(), e_block_size, e_block_size, g.get(), inverse_ete_g.get()); UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs); // S -= F'E(E'E)^{-1}E'F ChunkOuterProduct( thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs); } #ifdef CERES_USE_TBB ); }); #endif // CERES_USE_TBB // For rows with no e_blocks, the schur complement update reduces to // S += F'F. NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs); } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: BackSubstitute(const BlockSparseMatrix* A, const double* b, const double* D, const double* z, double* y) { const CompressedRowBlockStructure* bs = A->block_structure(); #ifdef CERES_USE_OPENMP #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) #endif // CERES_USE_OPENMP #ifndef CERES_USE_TBB for (int i = 0; i < chunks_.size(); ++i) { #else tbb::task_arena task_arena(num_threads_); task_arena.execute([&]{ tbb::parallel_for(0, int(chunks_.size()), [&](int i) { #endif // !CERES_USE_TBB const Chunk& chunk = chunks_[i]; const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; double* y_ptr = y + bs->cols[e_block_id].position; typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size); typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size, e_block_size); if (D != NULL) { const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(D + bs->cols[e_block_id].position, e_block_size); ete = diag.array().square().matrix().asDiagonal(); } else { ete.setZero(); } const double* values = A->values(); for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[chunk.start + j]; const Cell& e_cell = row.cells.front(); DCHECK_EQ(e_block_id, e_cell.block_id); FixedArray<double, 8> sj(row.block.size); typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) = typename EigenTypes<kRowBlockSize>::ConstVectorRef (b + bs->rows[chunk.start + j].block.position, row.block.size); for (int c = 1; c < row.cells.size(); ++c) { const int f_block_id = row.cells[c].block_id; const int f_block_size = bs->cols[f_block_id].size; const int r_block = f_block_id - num_eliminate_blocks_; MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>( values + row.cells[c].position, row.block.size, f_block_size, z + lhs_row_layout_[r_block], sj.get()); } MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, sj.get(), y_ptr); MatrixTransposeMatrixMultiply <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, values + e_cell.position, row.block.size, e_block_size, ete.data(), 0, 0, e_block_size, e_block_size); } y_block = InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete) * y_block; } #ifdef CERES_USE_TBB ); }); #endif // CERES_USE_TBB } // Update the rhs of the reduced linear system. Compute // // F'b - F'E(E'E)^(-1) E'b template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: UpdateRhs(const Chunk& chunk, const BlockSparseMatrix* A, const double* b, int row_block_counter, const double* inverse_ete_g, double* rhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; int b_pos = bs->rows[row_block_counter].block.position; const double* values = A->values(); for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[row_block_counter + j]; const Cell& e_cell = row.cells.front(); typename EigenTypes<kRowBlockSize>::Vector sj = typename EigenTypes<kRowBlockSize>::ConstVectorRef (b + b_pos, row.block.size); MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>( values + e_cell.position, row.block.size, e_block_size, inverse_ete_g, sj.data()); for (int c = 1; c < row.cells.size(); ++c) { const int block_id = row.cells[c].block_id; const int block_size = bs->cols[block_id].size; const int block = block_id - num_eliminate_blocks_; CeresMutexLock l(rhs_locks_[block]); MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>( values + row.cells[c].position, row.block.size, block_size, sj.data(), rhs + lhs_row_layout_[block]); } b_pos += row.block.size; } } // Given a Chunk - set of rows with the same e_block, e.g. in the // following Chunk with two rows. // // E F // [ y11 0 0 0 | z11 0 0 0 z51] // [ y12 0 0 0 | z12 z22 0 0 0] // // this function computes twp matrices. The diagonal block matrix // // ete = y11 * y11' + y12 * y12' // // and the off diagonal blocks in the Guass Newton Hessian. // // buffer = [y11'(z11 + z12), y12' * z22, y11' * z51] // // which are zero compressed versions of the block sparse matrices E'E // and E'F. // // and the gradient of the e_block, E'b. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: ChunkDiagonalBlockAndGradient( const Chunk& chunk, const BlockSparseMatrix* A, const double* b, int row_block_counter, typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete, double* g, double* buffer, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); int b_pos = bs->rows[row_block_counter].block.position; const int e_block_size = ete->rows(); // Iterate over the rows in this chunk, for each row, compute the // contribution of its F blocks to the Schur complement, the // contribution of its E block to the matrix EE' (ete), and the // corresponding block in the gradient vector. const double* values = A->values(); for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[row_block_counter + j]; if (row.cells.size() > 1) { EBlockRowOuterProduct(A, row_block_counter + j, lhs); } // Extract the e_block, ETE += E_i' E_i const Cell& e_cell = row.cells.front(); MatrixTransposeMatrixMultiply <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, values + e_cell.position, row.block.size, e_block_size, ete->data(), 0, 0, e_block_size, e_block_size); // g += E_i' b_i MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, b + b_pos, g); // buffer = E'F. This computation is done by iterating over the // f_blocks for each row in the chunk. for (int c = 1; c < row.cells.size(); ++c) { const int f_block_id = row.cells[c].block_id; const int f_block_size = bs->cols[f_block_id].size; double* buffer_ptr = buffer + FindOrDie(chunk.buffer_layout, f_block_id); MatrixTransposeMatrixMultiply <kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, values + row.cells[c].position, row.block.size, f_block_size, buffer_ptr, 0, 0, e_block_size, f_block_size); } b_pos += row.block.size; } } // Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the // Schur complement matrix, i.e // // S -= F'E(E'E)^{-1}E'F. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: ChunkOuterProduct(int thread_id, const CompressedRowBlockStructure* bs, const Matrix& inverse_ete, const double* buffer, const BufferLayoutType& buffer_layout, BlockRandomAccessMatrix* lhs) { // This is the most computationally expensive part of this // code. Profiling experiments reveal that the bottleneck is not the // computation of the right-hand matrix product, but memory // references to the left hand side. const int e_block_size = inverse_ete.rows(); BufferLayoutType::const_iterator it1 = buffer_layout.begin(); double* b1_transpose_inverse_ete = chunk_outer_product_buffer_.get() + thread_id * buffer_size_; // S(i,j) -= bi' * ete^{-1} b_j for (; it1 != buffer_layout.end(); ++it1) { const int block1 = it1->first - num_eliminate_blocks_; const int block1_size = bs->cols[it1->first].size; MatrixTransposeMatrixMultiply <kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>( buffer + it1->second, e_block_size, block1_size, inverse_ete.data(), e_block_size, e_block_size, b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size); BufferLayoutType::const_iterator it2 = it1; for (; it2 != buffer_layout.end(); ++it2) { const int block2 = it2->first - num_eliminate_blocks_; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { const int block2_size = bs->cols[it2->first].size; CeresMutexLock l(&cell_info->m); MatrixMatrixMultiply <kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>( b1_transpose_inverse_ete, block1_size, e_block_size, buffer + it2->second, e_block_size, block2_size, cell_info->values, r, c, row_stride, col_stride); } } } } // For rows with no e_blocks, the schur complement update reduces to S // += F'F. This function iterates over the rows of A with no e_block, // and calls NoEBlockRowOuterProduct on each row. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: NoEBlockRowsUpdate(const BlockSparseMatrix* A, const double* b, int row_block_counter, BlockRandomAccessMatrix* lhs, double* rhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const double* values = A->values(); for (; row_block_counter < bs->rows.size(); ++row_block_counter) { const CompressedRow& row = bs->rows[row_block_counter]; for (int c = 0; c < row.cells.size(); ++c) { const int block_id = row.cells[c].block_id; const int block_size = bs->cols[block_id].size; const int block = block_id - num_eliminate_blocks_; MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( values + row.cells[c].position, row.block.size, block_size, b + row.block.position, rhs + lhs_row_layout_[block]); } NoEBlockRowOuterProduct(A, row_block_counter, lhs); } } // A row r of A, which has no e_blocks gets added to the Schur // Complement as S += r r'. This function is responsible for computing // the contribution of a single row r to the Schur complement. It is // very similar in structure to EBlockRowOuterProduct except for // one difference. It does not use any of the template // parameters. This is because the algorithm used for detecting the // static structure of the matrix A only pays attention to rows with // e_blocks. This is becase rows without e_blocks are rare and // typically arise from regularization terms in the original // optimization problem, and have a very different structure than the // rows with e_blocks. Including them in the static structure // detection will lead to most template parameters being set to // dynamic. Since the number of rows without e_blocks is small, the // lack of templating is not an issue. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: NoEBlockRowOuterProduct(const BlockSparseMatrix* A, int row_block_index, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const CompressedRow& row = bs->rows[row_block_index]; const double* values = A->values(); for (int i = 0; i < row.cells.size(); ++i) { const int block1 = row.cells[i].block_id - num_eliminate_blocks_; DCHECK_GE(block1, 0); const int block1_size = bs->cols[row.cells[i].block_id].size; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { CeresMutexLock l(&cell_info->m); // This multiply currently ignores the fact that this is a // symmetric outer product. MatrixTransposeMatrixMultiply <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[i].position, row.block.size, block1_size, cell_info->values, r, c, row_stride, col_stride); } for (int j = i + 1; j < row.cells.size(); ++j) { const int block2 = row.cells[j].block_id - num_eliminate_blocks_; DCHECK_GE(block2, 0); DCHECK_LT(block1, block2); int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { const int block2_size = bs->cols[row.cells[j].block_id].size; CeresMutexLock l(&cell_info->m); MatrixTransposeMatrixMultiply <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[j].position, row.block.size, block2_size, cell_info->values, r, c, row_stride, col_stride); } } } } // For a row with an e_block, compute the contribition S += F'F. This // function has the same structure as NoEBlockRowOuterProduct, except // that this function uses the template parameters. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: EBlockRowOuterProduct(const BlockSparseMatrix* A, int row_block_index, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const CompressedRow& row = bs->rows[row_block_index]; const double* values = A->values(); for (int i = 1; i < row.cells.size(); ++i) { const int block1 = row.cells[i].block_id - num_eliminate_blocks_; DCHECK_GE(block1, 0); const int block1_size = bs->cols[row.cells[i].block_id].size; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { CeresMutexLock l(&cell_info->m); // block += b1.transpose() * b1; MatrixTransposeMatrixMultiply <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[i].position, row.block.size, block1_size, cell_info->values, r, c, row_stride, col_stride); } for (int j = i + 1; j < row.cells.size(); ++j) { const int block2 = row.cells[j].block_id - num_eliminate_blocks_; DCHECK_GE(block2, 0); DCHECK_LT(block1, block2); const int block2_size = bs->cols[row.cells[j].block_id].size; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { // block += b1.transpose() * b2; CeresMutexLock l(&cell_info->m); MatrixTransposeMatrixMultiply <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[j].position, row.block.size, block2_size, cell_info->values, r, c, row_stride, col_stride); } } } } } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
LinearSolvers.h
#ifndef USE_EIGEN #define USE_EIGEN 0 #endif // USE_EIGEN #ifndef USE_CHOLMOD #define USE_CHOLMOD 0 #endif // USE_CHOLMOD #if USE_CHOLMOD #pragma message( "[WARNING] Need to explicitly exclude VCOMP.lib" ) #include <Cholmod/cholmod.h> #pragma comment( lib , "CHOLMOD.lib" ) #define DLONG #ifdef DLONG typedef long long SOLVER_LONG; #define CHOLMOD( name ) cholmod_l_ ## name #else // !DLONG typedef int SOLVER_LONG; #define CHOLMOD( name ) cholmod_ ## name #endif // DLONG #endif // USE_CHOLMOD #include <Util/SparseMatrix.h> double SquareNorm( const double* values , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += values[i] * values[i] ; return norm2; } double SquareNorm( const float* values , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += values[i] * values[i] ; return norm2; } template< class Type > double SquareNorm( const Type* values , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += values[dim].squareNorm() ; return norm2 ; } double SquareDifference( const double* values1 , const double* values2 , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += ( values1[i] - values2[i] ) * ( values1[i] - values2[i] ) ; return norm2; } double SquareDifference( const float* values1 , const float* values2 , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += ( values1[i] - values2[i] ) * ( values1[i] - values2[i] ) ; return norm2; } template< class Type > double SquareDifference( const Type* values1 , const Type* values2 , int dim ){ double norm2 = 0 ; for( int i=0 ; i<dim ; i++ ) norm2 += ( values1[dim] - values2[dim] ).squareNorm() ; return norm2 ; } // This is the conjugate gradients solver. // The assumption is that the class SPDOperator defines a method operator()( const Real* , Real* ) which corresponds to applying a symmetric positive-definite operator. template< class Real > struct CGScratch { Real *r , *d , *q; CGScratch( void ) : r(NULL) , d(NULL) , q(NULL) , _dim(0){ ; } CGScratch( int dim ) : r(NULL) , d(NULL) , q(NULL){ resize(dim); } ~CGScratch( void ){ resize(0); } void resize( int dim ) { if( dim!=_dim ) { if( r ) delete[] r ; r = NULL; if( d ) delete[] d ; d = NULL; if( q ) delete[] q ; q = NULL; if( dim ) r = new Real[dim] , d = new Real[dim] , q = new Real[dim]; _dim = dim; } } protected: int _dim; }; template< class Real > struct PreconditionedCGScratch : public CGScratch< Real > { Real *s; PreconditionedCGScratch( void ) : CGScratch< Real >() , s(NULL){ ; } PreconditionedCGScratch( int dim ) : CGScratch< Real >() { resize(dim); } ~PreconditionedCGScratch( void ){ resize(0); } void resize( int dim ) { if( dim!=CGScratch< Real >::_dim ) { if( s ) delete[] s; s = NULL; if( dim ) s = new Real[dim]; } CGScratch< Real >::resize( dim ); } }; template< class Real > struct DiagonalPreconditioner { Real* iDiagonal; DiagonalPreconditioner( void ) : iDiagonal(NULL) , _dim(0){ ; } ~DiagonalPreconditioner( void ){ if( iDiagonal ) delete[] iDiagonal ; iDiagonal = NULL; } void set( const SparseMatrix< Real , int >& M ) { if( _dim!=M.Rows() ) { _dim = (int)M.Rows(); if( iDiagonal ) delete[] iDiagonal , iDiagonal = NULL; if( _dim>0 ) iDiagonal = new Real[_dim]; } memset( iDiagonal , 0 , sizeof(Real)*_dim ); #pragma omp parallel for for( int i=0 ; i<M.Rows() ; i++ ) { for( int j=0 ; j<M.rowSizes[i] ; j++ ) if( M[i][j].N==i ) iDiagonal[i] += M[i][j].Value; iDiagonal[i] = (Real)1./iDiagonal[i]; } } void operator()( const Real* in , Real* out ) const { #pragma omp parallel for for( int i=0 ; i<_dim ; i++ ) out[i] = in[i] * iDiagonal[i]; } protected: int _dim; }; template< class Real , class SPDOperator > int SolveCG( SPDOperator& L , int iters , int dim , const Real* b , Real* x , CGScratch< Real >* scratch=NULL , double eps=1e-8 , int threads=1 , bool verbose=false ) { eps *= eps; Real *r , *d , *q; if( scratch ) r = scratch->r , d = scratch->d , q = scratch->q; else r = new Real[dim] , d = new Real[dim] , q = new Real[dim]; memset( r , 0 , sizeof(Real)*dim ) , memset( d , 0 , sizeof(Real)*dim ) , memset( q , 0 , sizeof(Real)*dim ); double delta_new = 0 , delta_0; L( x , r ); #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) d[i] = r[i] = b[i] - r[i] , delta_new += r[i] * r[i]; delta_0 = delta_new; if( delta_new<eps ) { if( !scratch ) delete[] r , delete[] d , delete[] q; return 0; } int ii; for( ii=0 ; ii<iters && delta_new>eps*delta_0 ; ii++ ) { L( d , q ); double dDotQ = 0; #pragma omp parallel for num_threads( threads ) reduction( + : dDotQ ) for( int i=0 ; i<dim ; i++ ) dDotQ += d[i] * q[i]; Real alpha = Real( delta_new / dDotQ ); double delta_old = delta_new; delta_new = 0; const int RESET_COUNT = 50; if( (ii%RESET_COUNT)==(RESET_COUNT-1) ) { #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) x[i] += d[i] * alpha; L( x , r ); #pragma omp parallel for num_threads( threads ) reduction ( + : delta_new ) for( int i=0 ; i<dim ; i++ ) r[i] = b[i] - r[i] , delta_new += r[i] * r[i]; } else #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) r[i] -= q[i] * alpha , delta_new += r[i] * r[i] , x[i] += d[i] * alpha; Real beta = Real( delta_new / delta_old ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) d[i] = r[i] + d[i] * beta; } if( verbose ) { L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] -= b[i]; printf( "CG: %d %g -> %g\n" , ii , SquareNorm( b , dim ) , SquareNorm( r , dim ) ); } if( !scratch ) delete[] r , delete[] d , delete[] q; return ii; } template< class Real , class SPDOperator , class SPDPreconditioner > int SolvePreconditionedCG( SPDOperator& L , SPDPreconditioner& Pinverse , int iters , int dim , const Real* b , Real* x , PreconditionedCGScratch< Real >* scratch=NULL , double eps=1e-8 , int threads=1 , bool verbose=false ) { eps *= eps; Real *r , *d , *q , *s; if( scratch ) r = scratch->r , d = scratch->d , q = scratch->q , s = scratch->s; else r = new Real[dim] , d = new Real[dim] , q = new Real[dim] , s = new Real[dim]; memset( r , 0 , sizeof(Real)*dim ) , memset( d , 0 , sizeof(Real)*dim ) , memset( q , 0 , sizeof(Real)*dim ) , memset( s , 0 , sizeof(Real)*dim ); double delta_new = 0 , delta_0; L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] = b[i] - r[i]; Pinverse( r , d ); #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) delta_new += r[i] * d[i]; delta_0 = delta_new; if( delta_new<eps ) { if( !scratch ) delete[] r , delete[] d , delete[] q; return 0; } int ii; for( ii=0 ; ii<iters && delta_new>eps*delta_0 ; ii++ ) { L( d , q ); double dDotQ = 0; #pragma omp parallel for num_threads( threads ) reduction( + : dDotQ ) for( int i=0 ; i<dim ; i++ ) dDotQ += d[i] * q[i]; Real alpha = Real( delta_new / dDotQ ); const int RESET_COUNT = 50; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) x[i] += d[i] * alpha; if( (ii%RESET_COUNT)==(RESET_COUNT-1) ) { L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] = b[i] - r[i]; } else #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) r[i] -= q[i] * alpha; Pinverse( r , s ); double delta_old = delta_new; delta_new = 0; #pragma omp parallel for num_threads( threads ) reduction( + : delta_new ) for( int i=0 ; i<dim ; i++ ) delta_new += r[i] * s[i]; Real beta = Real( delta_new / delta_old ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) d[i] = s[i] + d[i] * beta; } if( verbose ) { L( x , r ); #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<dim ; i++ ) r[i] -= b[i]; printf( "PCCG: %d %g -> %g\n" , ii , SquareNorm( b , dim ) , SquareNorm( r , dim ) ); } if( !scratch ) delete[] r , delete[] d , delete[] q , delete[] s; return ii; } #if USE_EIGEN #include <Eigen/Sparse> class EigenCholeskySolver { typedef Eigen::SimplicialLLT< Eigen::SparseMatrix< double > > Eigen_Solver; typedef Eigen::VectorXd Eigen_Vector; Eigen_Solver _solver; Eigen_Vector _eigenB , _eigenX; public: template< class Real > EigenCholeskySolver( const SparseMatrix< Real , int >& M ) { Eigen::SparseMatrix< double > eigenM( int( M.Rows() ) , int( M.Rows() ) ); std::vector< Eigen::Triplet<double> > triplets; triplets.reserve( M.Entries() ); for( int i=0 ; i<M.Rows() ; i++ ) for( int j=0 ; j<M.RowSize(i) ; j++ ) triplets.push_back( Eigen::Triplet< double >( i , M[i][j].N , M[i][j].Value ) ); eigenM.setFromTriplets( triplets.begin() , triplets.end() ); _solver.analyzePattern( eigenM ); _solver.factorize( eigenM ); if( _solver.info()!=Eigen::Success ) fprintf( stderr , "[ERROR] EigenSovler::EigenCholeskySolver Failed to factorize matrix\n" ) , exit(0); _eigenB.resize( M.Rows() ) , _eigenX.resize( M.Rows() ); } template< class Real > void solve( const Real* b , Real* x ) { #pragma omp parallel for for( int i=0 ; i<_eigenB.size() ; i++ ) _eigenB[i] = b[i]; _eigenX = _solver.solve( _eigenB ); #pragma omp parallel for for( int i=0 ; i<_eigenX.size() ; i++ ) x[i] = _eigenX[i]; } size_t dimension( void ) const { return _eigenB.size(); } template< class Real > static void Solve( const SparseMatrix< Real , int >& M , const Real* b , Real* x ){ EigenCholeskySolver solver( M ) ; solver.solve( b , x ); } }; class EigenCGSolver { Eigen::ConjugateGradient< Eigen::SparseMatrix< double > > _solver; Eigen::VectorXd _eigenB , _eigenX; Eigen::SparseMatrix< double > _eigenM; public: template< class Real > EigenCGSolver( const SparseMatrix< Real , int >& M ) { _eigenM.resize( (int)M.Rows() , (int)M.Rows() ); std::vector< Eigen::Triplet<double> > triplets; triplets.reserve( M.Entries() ); for( int i=0 ; i<M.Rows() ; i++ ) for( int j=0 ; j<M.RowSize(i) ; j++ ) triplets.push_back( Eigen::Triplet< double >( i , M[i][j].N , M[i][j].Value ) ); _eigenM.setFromTriplets( triplets.begin() , triplets.end() ); _solver.compute( _eigenM ); _solver.analyzePattern( _eigenM ); if( _solver.info()!=Eigen::Success ) fprintf( stderr , "[ERROR] EigenSovler::EigenCGSolver Failed to factorize matrix\n" ) , exit(0); _eigenB.resize( M.Rows() ) , _eigenX.resize( M.Rows() ); } template< class Real > void solve( const Real* b , Real* x , int iters ) { _solver.setMaxIterations( iters ); #pragma omp parallel for for( int i=0 ; i<_eigenB.size() ; i++ ) _eigenB[i] = b[i] , _eigenX[i] = x[i]; _eigenX = _solver.solveWithGuess( _eigenB , _eigenX ); #pragma omp parallel for for( int i=0 ; i<_eigenX.size() ; i++ ) x[i] = _eigenX[i]; } size_t dimension( void ) const { return _eigenB.size(); } template< class Real > static void Solve( const SparseMatrix< Real , int >& M , const Real* b , Real* x , int iters ){ EigenCGSolver solver( M ) ; solver._solver.setMaxIterations( iters ) ; solver.solve( b , x ); } }; #endif // USE_EIGEN #if USE_CHOLMOD class CholmodSolver { const static bool LOWER_TRIANGULAR = true; int dim; cholmod_factor* cholmod_L; cholmod_dense* cholmod_b; cholmod_sparse* cholmod_M; std::vector< bool > flaggedValues; template< class Real > void _init( const SparseMatrix< Real , int >& M ); template< class Real > bool _update( const SparseMatrix< Real , int >& M ); public: static cholmod_common cholmod_C; static bool cholmod_C_set; template< class Real > CholmodSolver( const SparseMatrix< Real , int >& M ); ~CholmodSolver( void ); template< class Real > void solve( ConstPointer( Real ) b , Pointer( Real ) x ); int nonZeros( void ) const; }; bool CholmodSolver::cholmod_C_set = false; cholmod_common CholmodSolver::cholmod_C; template< class Real > CholmodSolver::CholmodSolver( const SparseMatrix< Real , int >& M ){ _init( M ) , _update( M ); } template< class Real > void CholmodSolver::_init( const SparseMatrix< Real , int >& M ) { { if( !cholmod_C_set ) CHOLMOD(start)( &cholmod_C ); cholmod_C_set = true; } dim = M.rows; int maxEntries; if( LOWER_TRIANGULAR ) { maxEntries = (int)( ( M.Entries()-M.rows ) / 2 + M.rows ); cholmod_M = CHOLMOD(allocate_sparse)( dim , dim , maxEntries , 0 , 1 , -1 , CHOLMOD_REAL , &cholmod_C ); } else { maxEntries = (int)M.Entries(); cholmod_M = CHOLMOD(allocate_sparse)( dim , dim , maxEntries , 0 , 1 , 0 , CHOLMOD_REAL , &cholmod_C ); } cholmod_M->i = malloc( sizeof( SOLVER_LONG ) * maxEntries ); cholmod_M->x = malloc( sizeof( double ) * maxEntries ); SOLVER_LONG *_p = (SOLVER_LONG*)cholmod_M->p; SOLVER_LONG *_i = (SOLVER_LONG*)cholmod_M->i; int off = 0; dim = 0; for( int i=0 ; i<M.rows ; i++ ) { _p[dim++] = off; for( int j=0 ; j<M.rowSizes[i] ; j++ ) if( !LOWER_TRIANGULAR || M[i][j].N>=i ) _i[off++] = M[i][j].N; } _p[dim] = off; cholmod_L = CHOLMOD(analyze)( cholmod_M , &cholmod_C ); cholmod_b = CHOLMOD(allocate_dense)( dim , 1 , dim , cholmod_M->xtype , &cholmod_C ); } template< class Real > bool CholmodSolver::_update( const SparseMatrix< Real , int >& M ) { double *_x = (double*)cholmod_M->x; int off = 0; SOLVER_LONG *_p = (SOLVER_LONG*)cholmod_M->p; #pragma omp parallel for for( int i=0 ; i<M.rows ; i++ ) { int off = (int)_p[i]; for( int j=0 ; j<M.rowSizes[i] ; j++ ) if( !LOWER_TRIANGULAR || M[i][j].N>=i ) _x[off++] = double( M[i][j].Value ); } cholmod_C.print = 0; CHOLMOD(factorize)( cholmod_M , cholmod_L , &cholmod_C ); if( cholmod_C.status==CHOLMOD_NOT_POSDEF ) { fprintf( stderr , "[WARNING] Matrix not positive-definite\n" ); return false; } else if( cholmod_C.status==CHOLMOD_OUT_OF_MEMORY ) { fprintf( stderr , "[WARNING] CHOLMOD ran out of memory\n" ); return false; } else if( cholmod_C.status!=CHOLMOD_OK ) { fprintf( stderr , "[WARNING] CHOLMOD status not OK: %d\n" , cholmod_C.status ); return false; } return true; } CholmodSolver::~CholmodSolver( void ) { if( cholmod_L ) CHOLMOD(free_factor)( &cholmod_L , &cholmod_C ) , cholmod_L = NULL; if( cholmod_b ) CHOLMOD(free_dense )( &cholmod_b , &cholmod_C ) , cholmod_b = NULL; if( cholmod_M ) CHOLMOD(free_sparse)( &cholmod_M , &cholmod_C ) , cholmod_M = NULL; } template< class Real > void CholmodSolver::solve( ConstPointer( Real ) b , Pointer( Real ) x ) { double* _b = (double*)cholmod_b->x; for( int i=0 ; i<dim ; i++ ) _b[i] = (double)b[i]; cholmod_dense* cholmod_x = CHOLMOD(solve)( CHOLMOD_A , cholmod_L , cholmod_b , &cholmod_C ); double* _x = (double*)cholmod_x->x; for( int i=0 ; i<dim ; i++ ) x[i] = (Real)_x[i]; CHOLMOD(free_dense)( &cholmod_x , &cholmod_C ); } int CholmodSolver::nonZeros( void ) const { long long nz = 0; if( cholmod_L->xtype != CHOLMOD_PATTERN && !(cholmod_L->is_super ) ) for( int i=0 ; i<cholmod_L->n ; i++ ) nz += ((SOLVER_LONG*)cholmod_L->nz)[i]; bool examine_super = false; if( cholmod_L->xtype != CHOLMOD_PATTERN ) examine_super = true ; else examine_super = ( ((int*)cholmod_L->s)[0] != (-1)); if( examine_super ) { /* check and print each supernode */ for (int s = 0 ; s < cholmod_L->nsuper ; s++) { int k1 = ((int*)cholmod_L->super) [s] ; int k2 = ((int*)cholmod_L->super) [s+1] ; int psi = ((int*)cholmod_L->pi)[s] ; int psend = ((int*)cholmod_L->pi)[s+1] ; int nsrow = psend - psi ; int nscol = k2 - k1 ; nz += nscol * nsrow - (nscol*nscol - nscol)/2 ; } } return (int)nz; } #endif // USE_CHOLMOD
correlation.c
/** * correlation.c This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define ERROR_THRESHOLD 1.05 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif /* Problem size */ #define M SIZE #define N SIZE #define sqrt_of_array_cell(x, j) sqrt(x[j]) #define FLOAT_N 3214212.01f #define EPS 0.005f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *data) { int i, j; for (i = 0; i < (M + 1); i++) { for (j = 0; j < (N + 1); j++) { data[i * (N + 1) + j] = ((DATA_TYPE)i * j) / (M + 1); } } } void correlation(DATA_TYPE *data, DATA_TYPE *mean, DATA_TYPE *stddev, DATA_TYPE *symmat) { int i, j, j1, j2; // Determine mean of column vectors of input data matrix for (j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } // Determine standard deviations of column vectors of data matrix. for (j = 1; j < (M + 1); j++) { stddev[j] = 0.0; for (i = 1; i < (N + 1); i++) { stddev[j] += (data[i * (M + 1) + j] - mean[j]) * (data[i * (M + 1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt_of_array_cell(stddev, j); stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j]; } // i - threadIdx.x, j = threadIdx.y // Center and reduce the column vectors. for (i = 1; i < (N + 1); i++) { for (j = 1; j < (M + 1); j++) { data[i * (M + 1) + j] -= mean[j]; data[i * (M + 1) + j] /= (sqrt(FLOAT_N) * stddev[j]); } } // Calculate the m * m correlation matrix. for (j1 = 1; j1 < M; j1++) { symmat[j1 * (M + 1) + j1] = 1.0; for (j2 = j1 + 1; j2 < (M + 1); j2++) { symmat[j1 * (M + 1) + j2] = 0.0; for (i = 1; i < (N + 1); i++) { symmat[j1 * (M + 1) + j2] += (data[i * (M + 1) + j1] * data[i * (M + 1) + j2]); } symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2]; } } symmat[M * (M + 1) + M] = 1.0; } void correlation_OMP(DATA_TYPE *data, DATA_TYPE *mean, DATA_TYPE *stddev, DATA_TYPE *symmat) { int i, j, k; #pragma omp target data map(to: data[:(M+1)*(N+1)], mean[:(M+1)], stddev[:(M+1)]) map(tofrom: symmat[:(M+1)*(N+1)]) device(DEVICE_ID) { // Determine mean of column vectors of input data matrix #pragma omp target teams distribute parallel for private(i) device(DEVICE_ID) for (j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } // Determine standard deviations of column vectors of data matrix. #pragma omp target teams distribute parallel for private(i) device(DEVICE_ID) for (j = 1; j < (M + 1); j++) { stddev[j] = 0.0; for (i = 1; i < (N + 1); i++) { stddev[j] += (data[i * (M + 1) + j] - mean[j]) * (data[i * (M + 1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt(stddev[j]); if (stddev[j] <= EPS) { stddev[j] = 1.0; } } // Center and reduce the column vectors. #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (i = 1; i < (N + 1); i++) { for (j = 1; j < (M + 1); j++) { data[i * (M + 1) + j] -= mean[j]; data[i * (M + 1) + j] /= (sqrt(FLOAT_N) * stddev[j]); } } // Calculate the m * m correlation matrix. #pragma omp target teams distribute parallel for private(j, i) device(DEVICE_ID) for (k = 1; k < M; k++) { symmat[k * (M + 1) + k] = 1.0; for (j = k + 1; j < (M + 1); j++) { symmat[k * (M + 1) + j] = 0.0; for (i = 1; i < (N + 1); i++) { symmat[k * (M + 1) + j] += (data[i * (M + 1) + k] * data[i * (M + 1) + j]); } symmat[j * (M + 1) + k] = symmat[k * (M + 1) + j]; } } } symmat[M * (M + 1) + M] = 1.0; } int compareResults(DATA_TYPE *symmat, DATA_TYPE *symmat_outputFromGpu) { int i, j, fail; fail = 0; for (i = 1; i < (M + 1); i++) { for (j = 1; j < (N + 1); j++) { if (percentDiff(symmat[i * (N + 1) + j], symmat_outputFromGpu[i * (N + 1) + j]) > ERROR_THRESHOLD) { fail++; // printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], // symmat_GPU[i*N + j]); } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); return fail; } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *data; DATA_TYPE *mean; DATA_TYPE *stddev; DATA_TYPE *symmat; DATA_TYPE *symmat_GPU; data = (DATA_TYPE *)malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE)); mean = (DATA_TYPE *)malloc((M + 1) * sizeof(DATA_TYPE)); stddev = (DATA_TYPE *)malloc((M + 1) * sizeof(DATA_TYPE)); symmat = (DATA_TYPE *)malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE)); symmat_GPU = (DATA_TYPE *)malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE)); fprintf(stdout, "<< Correlation Computation >>\n"); init_arrays(data); t_start = rtclock(); correlation_OMP(data, mean, stddev, symmat_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST init_arrays(data); t_start = rtclock(); correlation(data, mean, stddev, symmat); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(symmat, symmat_GPU); #endif free(data); free(mean); free(stddev); free(symmat); free(symmat_GPU); return fail; }
WaveFunctionComponent.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign // Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory // Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign // Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H #define QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H #include "Message/Communicate.h" #include "Configuration.h" #include "Particle/ParticleSet.h" #include "Particle/VirtualParticleSet.h" #include "Particle/DistanceTableData.h" #include "OhmmsData/RecordProperty.h" #include "QMCWaveFunctions/OrbitalSetTraits.h" #include "Particle/MCWalkerConfiguration.h" #include "type_traits/template_types.hpp" #ifdef QMC_CUDA #include "type_traits/CUDATypes.h" #endif /**@file WaveFunctionComponent.h *@brief Declaration of WaveFunctionComponent */ namespace qmcplusplus { #ifdef QMC_CUDA struct NLjob { int walker; int elec; int numQuadPoints; NLjob(int w, int e, int n) : walker(w), elec(e), numQuadPoints(n) {} }; #endif ///forward declaration of WaveFunctionComponent class WaveFunctionComponent; ///forward declaration of DiffWaveFunctionComponent class DiffWaveFunctionComponent; typedef WaveFunctionComponent* WaveFunctionComponentPtr; typedef DiffWaveFunctionComponent* DiffWaveFunctionComponentPtr; /**@defgroup WaveFunctionComponent group * @brief Classes which constitute a many-body trial wave function * * A many-body trial wave function is * \f[ \Psi(\{ {\bf R}\}) = \prod_i \psi_{i}(\{ {\bf R}\}), * \f] * where \f$\Psi\f$s are represented by * the derived classes from WaveFunctionComponent. */ /** @ingroup WaveFunctionComponent * @brief An abstract class for a component of a many-body trial wave function * * mw_ prefix is a function name signature indicating it is for handling a batch of WaveFunctionComponent objects * which are required to be base class pointers of the same derived class type. * all the mw_ routines must be implemented in a way either stateless or maintains states of every walker. */ struct WaveFunctionComponent : public QMCTraits { /** enum for a update mode */ enum { ORB_PBYP_RATIO, /*!< particle-by-particle ratio only */ ORB_PBYP_ALL, /*!< particle-by-particle, update Value-Gradient-Laplacian */ ORB_PBYP_PARTIAL, /*!< particle-by-particle, update Value and Grdient */ ORB_WALKER, /*!< walker update */ ORB_ALLWALKER /*!< all walkers update */ }; typedef ParticleAttrib<ValueType> ValueVectorType; typedef ParticleAttrib<GradType> GradVectorType; typedef ParticleSet::Walker_t Walker_t; typedef Walker_t::WFBuffer_t WFBufferType; typedef Walker_t::Buffer_t BufferType; typedef OrbitalSetTraits<RealType>::ValueMatrix_t RealMatrix_t; typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t; typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t; typedef OrbitalSetTraits<ValueType>::HessType HessType; typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t; // the value type for log(psi) using LogValueType = std::complex<QTFull::RealType>; // the value type for psi(r')/psi(r) using PsiValueType = QTFull::ValueType; /** flag to set the optimization mode */ bool IsOptimizing; /** boolean to set optimization * * If true, this object is actively modified during optimization */ bool Optimizable; /** true, if this component is fermionic */ bool is_fermionic; /** current update mode */ int UpdateMode; /** current \f$\log\phi \f$ */ RealType LogValue; /** current phase */ RealType PhaseValue; /** Pointer to the differential WaveFunctionComponent of this object * * If dPsi=0, this WaveFunctionComponent is constant with respect to the optimizable variables */ DiffWaveFunctionComponentPtr dPsi; /** A vector for \f$ \frac{\partial \nabla \log\phi}{\partial \alpha} \f$ */ GradVectorType dLogPsi; /** A vector for \f$ \frac{\partial \nabla^2 \log\phi}{\partial \alpha} \f$ */ ValueVectorType d2LogPsi; /** Name of the class derived from WaveFunctionComponent */ std::string ClassName; ///list of variables this WaveFunctionComponent handles opt_variables_type myVars; ///Bytes in WFBuffer size_t Bytes_in_WFBuffer; /// default constructor WaveFunctionComponent(); //WaveFunctionComponent(const WaveFunctionComponent& old); ///default destructor virtual ~WaveFunctionComponent() {} inline void setOptimizable(bool optimizeit) { Optimizable = optimizeit; } virtual void resetPhaseDiff() {} ///assign a differential WaveFunctionComponent virtual void setDiffOrbital(DiffWaveFunctionComponentPtr d); ///assembles the full value from LogValue and PhaseValue ValueType getValue() const { #if defined(QMC_COMPLEX) RealType ratioMag = std::exp(LogValue); return ValueType(std::cos(PhaseValue) * ratioMag, std::sin(PhaseValue) * ratioMag); #else return std::exp(LogValue); #endif } /** check in optimizable parameters * @param active a super set of optimizable variables * * Add the paramemters this WaveFunctionComponent manage to active. */ virtual void checkInVariables(opt_variables_type& active) = 0; /** check out optimizable variables * * Update myVars index map */ virtual void checkOutVariables(const opt_variables_type& active) = 0; /** reset the parameters during optimizations */ virtual void resetParameters(const opt_variables_type& active) = 0; /** print the state, e.g., optimizables */ virtual void reportStatus(std::ostream& os) = 0; /** reset properties, e.g., distance tables, for a new target ParticleSet * @param P ParticleSet */ virtual void resetTargetParticleSet(ParticleSet& P) = 0; /** evaluate the value of the WaveFunctionComponent from scratch * @param P active ParticleSet * @param G Gradients, \f$\nabla\ln\Psi\f$ * @param L Laplacians, \f$\nabla^2\ln\Psi\f$ * @return the log value * * Mainly for walker-by-walker move. The initial stage of particle-by-particle * move also uses this. */ virtual RealType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) = 0; /** evaluate from scratch the same type WaveFunctionComponent of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$ * @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$ * @@param values the log WF values of walkers in a batch */ virtual void mw_evaluateLog(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, const std::vector<ParticleSet::ParticleGradient_t*>& G_list, const std::vector<ParticleSet::ParticleLaplacian_t*>& L_list) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw]->evaluateLog(*P_list[iw], *G_list[iw], *L_list[iw]); } /** recompute the value of the WaveFunctionComponents which require critical accuracy. * needed for Slater Determinants but not needed for most types of WaveFunctionComponents */ virtual void recompute(ParticleSet& P) {} // virtual void evaluateHessian(ParticleSet& P, IndexType iat, HessType& grad_grad_psi) // { // APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented"); // } virtual void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi_all) { APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented in " + ClassName + " class."); } /** return the current gradient for the iat-th particle * @param P quantum particle set * @param iat particle index * @return the gradient of the iat-th particle */ virtual GradType evalGrad(ParticleSet& P, int iat) { APP_ABORT("WaveFunctionComponent::evalGradient is not implemented in " + ClassName + " class."); return GradType(); } /** compute the current gradients for the iat-th particle of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_evalGrad(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, int iat, std::vector<GradType>& grad_now) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) grad_now[iw] = WFC_list[iw]->evalGrad(*P_list[iw], iat); } /** compute the current gradients for the iat-th particle of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_evalGrad(const std::vector<std::reference_wrapper<WaveFunctionComponent>>& WFC_list, const std::vector<std::reference_wrapper<ParticleSet>>& P_list, int iat, std::vector<GradType>& grad_now) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) grad_now[iw] = WFC_list[iw].get().evalGrad(P_list[iw].get(), iat); } /** return the logarithmic gradient for the iat-th particle * of the source particleset * @param Pquantum particle set * @param iat particle index * @return the gradient of the iat-th particle */ virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat) { // unit_test_hamiltonian calls this function incorrectly; do not abort for now // APP_ABORT("WaveFunctionComponent::evalGradSource is not implemented"); return GradType(); } /** Adds the gradient w.r.t. the iat-th particle of the * source particleset (ions) of the logarithmic gradient * and laplacian w.r.t. the target paritlceset (electrons). * @param P quantum particle set (electrons) * @param source classical particle set (ions) * @param iat particle index of source (ion) * @param the ion gradient of the elctron gradient * @param the ion gradient of the elctron laplacian. * @return the log gradient of psi w.r.t. the source particle iat */ virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat, TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad, TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad) { return GradType(); } /** evaluate the ratio of the new to old WaveFunctionComponent value and the new gradient * @param P the active ParticleSet * @param iat the index of a particle * @param grad_iat Gradient for the active particle */ virtual ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { APP_ABORT("WaveFunctionComponent::ratioGrad is not implemented in " + ClassName + " class."); return ValueType(); } /** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ * @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_ratioGrad(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, int iat, std::vector<PsiValueType>& ratios, std::vector<GradType>& grad_new) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) ratios[iw] = WFC_list[iw]->ratioGrad(*P_list[iw], iat, grad_new[iw]); } /** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ * @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_ratioGrad(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios, std::vector<GradType>& grad_new) { //#pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) ratios[iw] = WFC_list[iw].get().ratioGrad(P_list[iw], iat, grad_new[iw]); } /** a move for iat-th particle is accepted. Update the current content. * @param P target ParticleSet * @param iat index of the particle whose new position was proposed */ virtual void acceptMove(ParticleSet& P, int iat) = 0; /** moves of the iat-th particle on some walkers in a batch is accepted. Update the current content. * Note that all the lists only include accepted walkers. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index */ virtual void mw_acceptMove(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, int iat) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw]->acceptMove(*P_list[iw], iat); } /** complete all the delayed updates, must be called after each substep or step during pbyp move */ virtual void completeUpdates() {} /** complete all the delayed updates for all the walkers in a batch * must be called after each substep or step during pbyp move */ virtual void mw_completeUpdates(const std::vector<WaveFunctionComponent*>& WFC_list) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw]->completeUpdates(); } /** If a move for iat-th particle is rejected, restore to the content. * @param iat index of the particle whose new position was proposed * * Ye: hopefully we can gradually move away from restore */ virtual void restore(int iat) = 0; /** If a move for iat-th particle on some walkers in a batch is rejected, restore their contents * Note that all the lists only include rejected walkers. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param iat index of the particle whose new position was proposed * * Ye: hopefully we can gradually move away from restore */ virtual void mw_restore(const std::vector<WaveFunctionComponent*>& WFC_list, int iat) { //#pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw]->restore(iat); } /** evaluate the ratio of the new to old WaveFunctionComponent value * @param P the active ParticleSet * @param iat the index of a particle * @return \f$ \psi( \{ {\bf R}^{'} \} )/ \psi( \{ {\bf R}\})\f$ * * Specialized for particle-by-particle move */ virtual ValueType ratio(ParticleSet& P, int iat) = 0; /** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ */ virtual void mw_calcRatio(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, int iat, std::vector<PsiValueType>& ratios) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) ratios[iw] = WFC_list[iw]->ratio(*P_list[iw], iat); } /** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ */ virtual void mw_calcRatio(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios) { //#pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) ratios[iw] = WFC_list[iw].get().ratio(P_list[iw], iat); } /** For particle-by-particle move. Requests space in the buffer * based on the data type sizes of the objects in this class. * @param P particle set * @param buf Anonymous storage */ virtual void registerData(ParticleSet& P, WFBufferType& buf) = 0; /** For particle-by-particle move. Requests space in the buffer * based on the data type sizes of the objects in this class. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param buf_list Anonymous storage */ virtual void mw_registerData(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, const std::vector<WFBufferType*>& buf_list) { // We can't make this static but we can use a lambda with no capture to // restrict access to *this scope auto registerComponentData = [](WaveFunctionComponent& wfc, ParticleSet& pset, WFBufferType& wfb) { wfc.registerData(pset, wfb); }; for (int iw = 0; iw < WFC_list.size(); iw++) registerComponentData(*(WFC_list[iw]), *(P_list[iw]), *(buf_list[iw])); } /** For particle-by-particle move. Put the objects of this class * in the walker buffer or forward the memory cursor. * @param P particle set * @param buf Anonymous storage * @param fromscratch request recomputing the precision critical * pieces of wavefunction from scratch * @return log value of the wavefunction. */ virtual RealType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) = 0; /** For particle-by-particle move. Put the objects of this class * in the walker buffer or forward the memory cursor. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param buf_list Anonymous storage * @@param values the log WF values of walkers in a batch * @param fromscratch request recomputing the precision critical * pieces of wavefunction from scratch */ virtual void mw_updateBuffer(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, const RefVector<WFBufferType>& buf_list, bool fromscratch = false) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw].get().updateBuffer(P_list[iw], buf_list[iw], fromscratch); } /** For particle-by-particle move. Copy data or attach memory * from a walker buffer to the objects of this class. * The log value, P.G and P.L contribution from the objects * of this class are also added. * @param P particle set * @param buf Anonymous storage */ virtual void copyFromBuffer(ParticleSet& P, WFBufferType& buf) = 0; /** For particle-by-particle move. Copy data or attach memory * from a walker buffer to the objects of this class. * @param P particle set * @param buf Anonymous storage */ virtual void mw_copyFromBuffer(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, const std::vector<WFBufferType*>& buf_list) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw]->copyFromBuffer(*P_list[iw], *buf_list[iw]); } /** make clone * @param tqp target Quantum ParticleSet * @param deepcopy if true, make a decopy * * If not true, return a proxy class */ virtual WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const; /** Intended as a handle to break * * */ //virtual WaveFunctionComponentPtr makeThrScope(std::vector<std::pair<int,int>>& ptcl_group_indexes) const = 0; /** Return the Chiesa kinetic energy correction */ virtual RealType KECorrection(); /** Compute derivatives of the wavefunction with respect to the optimizable * parameters. * @param P particle set * @param optvars optimizable parameters * @param dlogpsi array of derivatives of the log of the wavefunction * @param dhpsioverpsi array of derivatives of the Laplacian of the wavefunction divided by the wavefunction. * Note that this does not use the Laplacian of the log of the wavefunction, as in evaluateLog. * Also the factor of -1/2 from the kinetic energy must be included here. The 1/m * factor is applied in TrialWaveFunction. */ virtual void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi, std::vector<ValueType>& dhpsioverpsi); /** Compute derivatives of rhe wavefunction with respect to the optimizable * parameters * @param P particle set * @param optvars optimizable parameters * @param dlogpsi array of derivatives of the log of the wavefunction * Note: this function differs from the evaluateDerivatives function in the way that it only computes * the derivative of the log of the wavefunction. */ virtual void evaluateDerivativesWF(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi); virtual void multiplyDerivsByOrbR(std::vector<ValueType>& dlogpsi) { RealType myrat = std::exp(LogValue) * std::cos(PhaseValue); for (int j = 0; j < myVars.size(); j++) { int loc = myVars.where(j); dlogpsi[loc] *= myrat; } } /** Calculates the derivatives of \grad(\textrm{log}(\psi)) with respect to the optimizable parameters, and the dot product of this is then performed with the passed-in G_in gradient vector. This object is then returned as dgradlogpsi. */ virtual void evaluateGradDerivatives(const ParticleSet::ParticleGradient_t& G_in, std::vector<ValueType>& dgradlogpsi) { APP_ABORT("Need specialization of WaveFunctionComponent::evaluateGradDerivatives in " + ClassName + " class.\n"); } virtual void finalizeOptimization() {} /** evaluate the ratios of one virtual move with respect to all the particles * @param P reference particleset * @param ratios \f$ ratios[i]=\{{\bf R}\}\rightarrow {r_0,\cdots,r_i^p=pos,\cdots,r_{N-1}}\f$ */ virtual void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios); /** evaluate ratios to evaluate the non-local PP * @param VP VirtualParticleSet * @param ratios ratios with new positions VP.R[k] the VP.refPtcl */ virtual void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios); /** evaluate ratios to evaluate the non-local PP * @param VP VirtualParticleSet * @param ratios ratios with new positions VP.R[k] the VP.refPtcl * @param dratios \f$\partial_{\alpha}(\ln \Psi ({\bf R}^{\prime}) - \ln \Psi ({\bf R})) \f$ */ virtual void evaluateDerivRatios(VirtualParticleSet& VP, const opt_variables_type& optvars, std::vector<ValueType>& ratios, Matrix<ValueType>& dratios); ///////////////////////////////////////////////////// // Functions for vectorized evaluation and updates // ///////////////////////////////////////////////////// #ifdef QMC_CUDA using CTS = CUDAGlobalTypes; virtual void freeGPUmem() {} virtual void recompute(MCWalkerConfiguration& W, bool firstTime) {} virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool, int kblocksize) {} /** Evaluate the log of the WF for all walkers * @param walkers vector of all walkers * @param logPsi output vector of log(psi) */ virtual void addLog(MCWalkerConfiguration& W, std::vector<RealType>& logPsi) { APP_ABORT("Need specialization of WaveFunctionComponent::addLog for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } /** Evaluate the wave-function ratio w.r.t. moving particle iat * for all walkers * @param walkers vector of all walkers * @param iat particle which is moving * @param psi_ratios output vector with psi_new/psi_old */ virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } // Returns the WF ratio and gradient w.r.t. iat for each walker // in the respective vectors virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void calcRatio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::calcRatio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void addRatio(MCWalkerConfiguration& W, int iat, int k, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::addRatio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void ratio(std::vector<Walker_t*>& walkers, std::vector<int>& iatList, std::vector<PosType>& rNew, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void addGradient(MCWalkerConfiguration& W, int iat, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::addGradient for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void calcGradient(MCWalkerConfiguration& W, int iat, int k, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::calcGradient for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void gradLapl(MCWalkerConfiguration& W, GradMatrix_t& grads, ValueMatrix_t& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::gradLapl for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void det_lookahead(MCWalkerConfiguration& W, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl, int iat, int k, int kd, int nw) { APP_ABORT("Need specialization of WaveFunctionComponent::det_lookahead for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void update(MCWalkerConfiguration* W, std::vector<Walker_t*>& walkers, int iat, std::vector<bool>* acc, int k) { APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void update(const std::vector<Walker_t*>& walkers, const std::vector<int>& iatList) { APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void NLratios(MCWalkerConfiguration& W, std::vector<NLjob>& jobList, std::vector<PosType>& quadPoints, std::vector<ValueType>& psi_ratios) { APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void NLratios(MCWalkerConfiguration& W, gpu::device_vector<CUDA_PRECISION*>& Rlist, gpu::device_vector<int*>& ElecList, gpu::device_vector<int>& NumCoreElecs, gpu::device_vector<CUDA_PRECISION*>& QuadPosList, gpu::device_vector<CUDA_PRECISION*>& RatioList, int numQuadPoints) { APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void evaluateDerivatives(MCWalkerConfiguration& W, const opt_variables_type& optvars, RealMatrix_t& dgrad_logpsi, RealMatrix_t& dhpsi_over_psi) { APP_ABORT("Need specialization of WaveFunctionComponent::evaluateDerivatives for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } #endif }; } // namespace qmcplusplus #endif
knucleotide.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Contributed by Jeremy Zerfas // This controls the maximum length for each set of oligonucleotide frequencies // and each oligonucleotide count output by this program. #define MAXIMUM_OUTPUT_LENGTH 4096 #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <khash.h> // Define a custom hash function to use instead of khash's default hash // function. This custom hash function uses a simpler bit shift and XOR which // results in several percent faster performance compared to when khash's // default hash function is used. #define CUSTOM_HASH_FUNCTION(key) ((key) ^ (key)>>7) KHASH_INIT(oligonucleotide, uint64_t, uint32_t, 1, CUSTOM_HASH_FUNCTION , kh_int64_hash_equal) // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct { uint64_t key; uint32_t value; } element; // Macro to convert a nucleotide character to a code. Note that upper and lower // case ASCII letters only differ in the fifth bit from the right and we only // need the three least significant bits to differentiate the letters 'A', 'C', // 'G', and 'T'. Spaces in this array/string will never be used as long as // characters other than 'A', 'C', 'G', and 'T' aren't used. #define code_For_Nucleotide(nucleotide) (" \0 \1\3 \2"[nucleotide & 0x7]) // And one more macro to convert the codes back to nucleotide characters. #define nucleotide_For_Code(code) ("ACGT"[code & 0x3]) // Function to use when sorting elements with qsort() later. Elements with // larger values will come first and in cases of identical values then elements // with smaller keys will come first. static int element_Compare(const element * const left_Element , const element * const right_Element){ // Sort based on element values. if(left_Element->value < right_Element->value) return 1; if(left_Element->value > right_Element->value) return -1; // If we got here then both items have the same value so then sort based on // key. return left_Element->key > right_Element->key ? 1 : -1; } // Generate frequencies for all oligonucleotides in polynucleotide that are of // desired_Length_For_Oligonucleotides and then save it to output. static void generate_Frequencies_For_Desired_Length_Oligonucleotides( const char * const polynucleotide, const intnative_t polynucleotide_Length , const intnative_t desired_Length_For_Oligonucleotides, char * const output){ khash_t(oligonucleotide) * hash_Table=kh_init(oligonucleotide); uint64_t key=0; const uint64_t mask=((uint64_t)1<<2*desired_Length_For_Oligonucleotides)-1; // For the first several nucleotides we only need to append them to key in // preparation for the insertion of complete oligonucleotides to hash_Table. for(intnative_t i=0; i<desired_Length_For_Oligonucleotides-1; i++) key=(key<<2 & mask) | polynucleotide[i]; // Add all the complete oligonucleotides of // desired_Length_For_Oligonucleotides to hash_Table and update the count // for each oligonucleotide. for(intnative_t i=desired_Length_For_Oligonucleotides-1 ; i<polynucleotide_Length; i++){ key=(key<<2 & mask) | polynucleotide[i]; int element_Was_Unused; const khiter_t k=kh_put(oligonucleotide, hash_Table, key , &element_Was_Unused); // If the element_Was_Unused, then initialize the count to 1, otherwise // increment the count. if(element_Was_Unused) kh_value(hash_Table, k)=1; else kh_value(hash_Table, k)++; } // Create an array of elements from hash_Table. intnative_t elements_Array_Size=kh_size(hash_Table), i=0; element * elements_Array=malloc(elements_Array_Size*sizeof(element)); uint32_t value; kh_foreach(hash_Table, key, value , elements_Array[i++]=((element){key, value})); kh_destroy(oligonucleotide, hash_Table); // Sort elements_Array. qsort(elements_Array, elements_Array_Size, sizeof(element) , (int (*)(const void *, const void *)) element_Compare); // Print the frequencies for each oligonucleotide. for(intnative_t output_Position=0, i=0; i<elements_Array_Size; i++){ // Convert the key for the oligonucleotide to a string. char oligonucleotide[desired_Length_For_Oligonucleotides+1]; for(intnative_t j=desired_Length_For_Oligonucleotides-1; j>-1; j--){ oligonucleotide[j]=nucleotide_For_Code(elements_Array[i].key); elements_Array[i].key>>=2; } oligonucleotide[desired_Length_For_Oligonucleotides]='\0'; // Output the frequency for oligonucleotide to output. output_Position+=snprintf(output+output_Position , MAXIMUM_OUTPUT_LENGTH-output_Position, "%s %.3f\n", oligonucleotide , 100.0f*elements_Array[i].value /(polynucleotide_Length-desired_Length_For_Oligonucleotides+1)); } free(elements_Array); } // Generate a count for the number of times oligonucleotide appears in // polynucleotide and then save it to output. static void generate_Count_For_Oligonucleotide( const char * const polynucleotide, const intnative_t polynucleotide_Length , const char * const oligonucleotide, char * const output){ const intnative_t oligonucleotide_Length=strlen(oligonucleotide); khash_t(oligonucleotide) * const hash_Table=kh_init(oligonucleotide); uint64_t key=0; const uint64_t mask=((uint64_t)1<<2*oligonucleotide_Length)-1; // For the first several nucleotides we only need to append them to key in // preparation for the insertion of complete oligonucleotides to hash_Table. for(intnative_t i=0; i<oligonucleotide_Length-1; i++) key=(key<<2 & mask) | polynucleotide[i]; // Add all the complete oligonucleotides of oligonucleotide_Length to // hash_Table and update the count for each oligonucleotide. for(intnative_t i=oligonucleotide_Length-1; i<polynucleotide_Length; i++){ key=(key<<2 & mask) | polynucleotide[i]; int element_Was_Unused; const khiter_t k=kh_put(oligonucleotide, hash_Table, key , &element_Was_Unused); // If the element_Was_Unused, then initialize the count to 1, otherwise // increment the count. if(element_Was_Unused) kh_value(hash_Table, k)=1; else kh_value(hash_Table, k)++; } // Generate the key for oligonucleotide. key=0; for(intnative_t i=0; i<oligonucleotide_Length; i++) key=(key<<2) | code_For_Nucleotide(oligonucleotide[i]); // Output the count for oligonucleotide to output. khiter_t k=kh_get(oligonucleotide, hash_Table, key); uintmax_t count=k==kh_end(hash_Table) ? 0 : kh_value(hash_Table, k); snprintf(output, MAXIMUM_OUTPUT_LENGTH, "%ju\t%s", count, oligonucleotide); kh_destroy(oligonucleotide, hash_Table); } int main(){ char buffer[4096]; // Find the start of the third polynucleotide. while(fgets(buffer, sizeof(buffer), stdin) && memcmp(">THREE", buffer , sizeof(">THREE")-1)); // Start with 1 MB of storage for reading in the polynucleotide and grow // geometrically. intnative_t polynucleotide_Capacity=1048576; intnative_t polynucleotide_Length=0; char * polynucleotide=malloc(polynucleotide_Capacity); // Start reading and encoding the third polynucleotide. while(fgets(buffer, sizeof(buffer), stdin) && buffer[0]!='>'){ for(intnative_t i=0; buffer[i]!='\0'; i++) if(buffer[i]!='\n') polynucleotide[polynucleotide_Length++] =code_For_Nucleotide(buffer[i]); // Make sure we still have enough memory allocated for any potential // nucleotides in the next line. if(polynucleotide_Capacity-polynucleotide_Length<sizeof(buffer)) polynucleotide=realloc(polynucleotide, polynucleotide_Capacity*=2); } // Free up any leftover memory. polynucleotide=realloc(polynucleotide, polynucleotide_Length); char output_Buffer[7][MAXIMUM_OUTPUT_LENGTH]; // Do the following functions in parallel. #pragma omp parallel sections { #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide , polynucleotide_Length, "GGTATTTTAATTTATAGT", output_Buffer[6]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide , polynucleotide_Length, "GGTATTTTAATT", output_Buffer[5]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide , polynucleotide_Length, "GGTATT", output_Buffer[4]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide , polynucleotide_Length, "GGTA", output_Buffer[3]); #pragma omp section generate_Count_For_Oligonucleotide(polynucleotide , polynucleotide_Length, "GGT", output_Buffer[2]); #pragma omp section generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide , polynucleotide_Length, 2, output_Buffer[1]); #pragma omp section generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide , polynucleotide_Length, 1, output_Buffer[0]); } // Output the results to stdout. for(intnative_t i=0; i<7; printf("%s\n", output_Buffer[i++])); free(polynucleotide); return 0; } /* NOTES: 64-bit Ubuntu quad core gcc (Ubuntu 6.3.0-12ubuntu2) 6.3.0 20170406 Fri, 14 Apr 2017 17:26:27 GMT MAKE: /usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp -std=c99 -IInclude knucleotide.c -o knucleotide.gcc_run rm knucleotide.c 0.41s to complete and log all make actions COMMAND LINE: ./knucleotide.gcc_run 0 < knucleotide-input25000000.txt PROGRAM OUTPUT: A 30.295 T 30.151 C 19.800 G 19.754 AA 9.177 TA 9.132 AT 9.131 TT 9.091 CA 6.002 AC 6.001 AG 5.987 GA 5.984 CT 5.971 TC 5.971 GT 5.957 TG 5.956 CC 3.917 GC 3.911 CG 3.909 GG 3.902 1471758 GGT 446535 GGTA 47336 GGTATT 893 GGTATTTTAATT 893 GGTATTTTAATTTATAGT */
critical-unrelated.c
/* Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze (joachim.protze@tu-dresden.de), Jonas Hahnfeld (hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin Schulz. LLNL-CODE-773957 All rights reserved. This file is part of Archer. For details, see https://pruners.github.io/archer. Please also read https://github.com/PRUNERS/archer/blob/master/LICENSE. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // RUN: %libarcher-compile-and-run-race | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char* argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { #pragma omp critical { // Dummy region. } var++; } fprintf(stderr, "DONE\n"); } // CHECK: WARNING: ThreadSanitizer: data race // CHECK: Write of size 4 // CHECK: #0 .omp_outlined. // CHECK: Previous write of size 4 // CHECK: #0 .omp_outlined. // CHECK: DONE
banded_source.c
void X(destroy_banded)(X(banded) * A) { free(A->data); free(A); } void X(destroy_triangular_banded)(X(triangular_banded) * A) { free(A->data); free(A); } void X(destroy_tb_eigen_FMM)(X(tb_eigen_FMM) * F) { if (F->n < TB_EIGEN_BLOCKSIZE) { free(F->V); free(F->lambda); } else { X(destroy_hierarchicalmatrix)(F->F0); X(destroy_tb_eigen_FMM)(F->F1); X(destroy_tb_eigen_FMM)(F->F2); free(F->X); free(F->Y); free(F->t1); free(F->t2); free(F->lambda); } free(F); } size_t X(summary_size_tb_eigen_FMM)(X(tb_eigen_FMM) * F) { size_t S = 0; if (F->n < TB_EIGEN_BLOCKSIZE) S += sizeof(FLT)*F->n*(F->n+1); else { S += X(summary_size_hierarchicalmatrix)(F->F0); S += X(summary_size_tb_eigen_FMM)(F->F1); S += X(summary_size_tb_eigen_FMM)(F->F2); S += sizeof(FLT)*F->n*(2*F->b+1); } return S; } X(banded) * X(malloc_banded)(const int m, const int n, const int l, const int u) { FLT * data = malloc(n*(l+u+1)*sizeof(FLT)); X(banded) * A = malloc(sizeof(X(banded))); A->data = data; A->m = m; A->n = n; A->l = l; A->u = u; return A; } X(banded) * X(calloc_banded)(const int m, const int n, const int l, const int u) { FLT * data = calloc(n*(l+u+1), sizeof(FLT)); X(banded) * A = malloc(sizeof(X(banded))); A->data = data; A->m = m; A->n = n; A->l = l; A->u = u; return A; } X(triangular_banded) * X(malloc_triangular_banded)(const int n, const int b) { FLT * data = malloc(n*(b+1)*sizeof(FLT)); X(triangular_banded) * A = malloc(sizeof(X(triangular_banded))); A->data = data; A->n = n; A->b = b; return A; } X(triangular_banded) * X(calloc_triangular_banded)(const int n, const int b) { FLT * data = calloc(n*(b+1), sizeof(FLT)); X(triangular_banded) * A = malloc(sizeof(X(triangular_banded))); A->data = data; A->n = n; A->b = b; return A; } FLT X(get_banded_index)(const X(banded) * A, const int i, const int j) { FLT * data = A->data; int m = A->m, n = A->n, l = A->l, u = A->u; if (0 <= i && 0 <= j && -l <= j-i && j-i <= u && i < m && j < n) return data[u+i-j+j*(l+u+1)]; else return 0; } void X(set_banded_index)(const X(banded) * A, const FLT v, const int i, const int j) { FLT * data = A->data; int m = A->m, n = A->n, l = A->l, u = A->u; if (0 <= i && 0 <= j && -l <= j-i && j-i <= u && i < m && j < n) data[u+i-j+j*(l+u+1)] = v; } FLT X(get_triangular_banded_index)(const X(triangular_banded) * A, const int i, const int j) { FLT * data = A->data; int n = A->n, b = A->b; if (0 <= i && 0 <= j && 0 <= j-i && j-i <= b && i < n && j < n) return data[i+(j+1)*b]; else return 0; } void X(set_triangular_banded_index)(const X(triangular_banded) * A, const FLT v, const int i, const int j) { FLT * data = A->data; int n = A->n, b = A->b; if (0 <= i && 0 <= j && 0 <= j-i && j-i <= b && i < n && j < n) data[i+(j+1)*b] = v; } // y ← α*A*x + β*y void X(gbmv)(FLT alpha, X(banded) * A, FLT * x, FLT beta, FLT * y) { FLT ab, c; int m = A->m, n = A->n, l = A->l, u = A->u; for (int i = 0; i < m; i++) y[i] = beta*y[i]; for (int i = 0; i < m; i++) for (int j = MAX(0, i-l); j < MIN(n, i+u+1); j++) y[i] += X(get_banded_index)(A, i, j)*x[j]; } // C ← α*A*B + β*C void X(gbmm)(FLT alpha, X(banded) * A, X(banded) * B, FLT beta, X(banded) * C) { FLT ab, c; int m = A->m, n = A->n, p = B->n; int l = C->l, u = C->u, l1 = A->l, u1 = A->u, l2 = B->l, u2 = B->u; if (C->m != m || B->m != n || C->n != p) { printf(RED("FastTransforms: gbmm: sizes are off.")"\n"); exit(EXIT_FAILURE); } if (C->l < l1+l2 || C->u < u1+u2) { printf(RED("FastTransforms: gbmm: bandwidths are off.")"\n"); exit(EXIT_FAILURE); } for (int j = 0; j < p; j++) for (int i = MAX(0, j-u); i < MIN(m, j+l+1); i++) { ab = 0; for (int k = MAX(MAX(0, i-l1), j-u2); k < MIN(MIN(n, i+u1+1), j+l2+1); k++) ab += X(get_banded_index)(A, i, k)*X(get_banded_index)(B, k, j); c = X(get_banded_index)(C, i, j); X(set_banded_index)(C, alpha*ab+beta*c, i, j); } } // C ← α*A+β*B void X(banded_add)(FLT alpha, X(banded) * A, FLT beta, X(banded) * B, X(banded) * C) { int m = C->m, n = C->n, l = C->l, u = C->u; if (A->m != m || B->m != m || A->n != n || B->n != n) { printf(RED("FastTransforms: banded_add: sizes are off.")"\n"); exit(EXIT_FAILURE); } if (l < MAX(A->l, B->l) || C->u < MAX(A->u, B->u)) { printf(RED("FastTransforms: banded_add: bandwidths are off.")"\n"); exit(EXIT_FAILURE); } for (int j = 0; j < n; j++) for (int i = MAX(0, j-u); i < MIN(m, j+l+1); i++) X(set_banded_index)(C, alpha*X(get_banded_index)(A, i, j) + beta*X(get_banded_index)(B, i, j), i, j); } // x ← A*x, x ← Aᵀ*x void X(tbmv)(char TRANS, X(triangular_banded) * A, FLT * x) { int n = A->n, b = A->b; FLT * data = A->data, t; if (TRANS == 'N') { for (int i = 0; i < n; i++) { t = 0; for (int k = i; k < MIN(i+b+1, n); k++) t += data[i+(k+1)*b]*x[k]; x[i] = t; } } else if (TRANS == 'T') { for (int i = n-1; i >= 0; i--) { t = 0; for (int k = MAX(i-b, 0); k <= i; k++) t += data[k+(i+1)*b]*x[k]; x[i] = t; } } } // x ← A⁻¹*x, x ← A⁻ᵀ*x void X(tbsv)(char TRANS, X(triangular_banded) * A, FLT * x) { int n = A->n, b = A->b; FLT * data = A->data, t; if (TRANS == 'N') { for (int i = n-1; i >= 0; i--) { t = 0; for (int k = i+1; k < MIN(i+b+1, n); k++) t += data[i+(k+1)*b]*x[k]; x[i] = (x[i] - t)/data[i+(i+1)*b]; } } else if (TRANS == 'T') { for (int i = 0; i < n; i++) { t = 0; for (int k = MAX(i-b, 0); k < i; k++) t += data[k+(i+1)*b]*x[k]; x[i] = (x[i] - t)/data[i+(i+1)*b]; } } } // AV = BVΛ void X(triangular_banded_eigenvalues)(X(triangular_banded) * A, X(triangular_banded) * B, FLT * lambda) { for (int j = 0; j < A->n; j++) lambda[j] = X(get_triangular_banded_index)(A, j, j)/X(get_triangular_banded_index)(B, j, j); } // Assumes eigenvectors are initialized by V[i,j] = 0 for i > j and V[j,j] ≠ 0. void X(triangular_banded_eigenvectors)(X(triangular_banded) * A, X(triangular_banded) * B, FLT * V) { int n = A->n, b1 = A->b, b2 = B->b; int b = MAX(b1, b2); FLT t, lam; for (int j = 1; j < n; j++) { lam = X(get_triangular_banded_index)(A, j, j)/X(get_triangular_banded_index)(B, j, j); for (int i = j-1; i >= 0; i--) { t = 0; for (int k = i+1; k < MIN(i+b+1, n); k++) t += (X(get_triangular_banded_index)(A, i, k) - lam*X(get_triangular_banded_index)(B, i, k))*V[k+j*n]; V[i+j*n] = t/(lam*X(get_triangular_banded_index)(B, i, i) - X(get_triangular_banded_index)(A, i, i)); } } } // AV + BVΛ = CVΩ void X(triangular_banded_eigenvalues_3arg)(X(triangular_banded) * A, X(triangular_banded) * B, FLT * lambda, X(triangular_banded) * C, FLT * omega) { for (int j = 0; j < A->n; j++) omega[j] = (X(get_triangular_banded_index)(A, j, j) + X(get_triangular_banded_index)(B, j, j)*lambda[j])/X(get_triangular_banded_index)(C, j, j); } // Assumes eigenvectors are initialized by V[i,j] = 0 for i > j and V[j,j] ≠ 0. void X(triangular_banded_eigenvectors_3arg)(X(triangular_banded) * A, X(triangular_banded) * B, FLT * lambda, X(triangular_banded) * C, FLT * V) { int n = A->n, b1 = A->b, b2 = B->b, b3 = C->b; int b = MAX(MAX(b1, b2), b3); FLT t, lam, omeg; for (int j = 1; j < n; j++) { lam = lambda[j]; omeg = (X(get_triangular_banded_index)(A, j, j) + X(get_triangular_banded_index)(B, j, j)*lambda[j])/X(get_triangular_banded_index)(C, j, j); for (int i = j-1; i >= 0; i--) { t = 0; for (int k = i+1; k < MIN(i+b+1, n); k++) t += (X(get_triangular_banded_index)(A, i, k) + lam*X(get_triangular_banded_index)(B, i, k) - omeg*X(get_triangular_banded_index)(C, i, k))*V[k+j*n]; V[i+j*n] = t/(omeg*X(get_triangular_banded_index)(C, i, i) - lam*X(get_triangular_banded_index)(B, i, i) - X(get_triangular_banded_index)(A, i, i)); } } } X(tb_eigen_FMM) * X(tb_eig_FMM)(X(triangular_banded) * A, X(triangular_banded) * B) { int n = A->n, b1 = A->b, b2 = B->b; int b = MAX(b1, b2); X(tb_eigen_FMM) * F = malloc(sizeof(X(tb_eigen_FMM))); if (n < TB_EIGEN_BLOCKSIZE) { FLT * V = calloc(n*n, sizeof(FLT)); for (int i = 0; i < n; i++) V[i+i*n] = 1; F->lambda = malloc(n*sizeof(FLT)); X(triangular_banded_eigenvalues)(A, B, F->lambda); X(triangular_banded_eigenvectors)(A, B, V); F->V = V; F->n = n; F->b = b; } else { F->lambda = malloc(n*sizeof(FLT)); X(triangular_banded_eigenvalues)(A, B, F->lambda); int s = n>>1; X(triangular_banded) * A1 = X(calloc_triangular_banded)(s, b1); X(triangular_banded) * B1 = X(calloc_triangular_banded)(s, b2); for (int j = 0; j < s; j++) for (int k = 0; k < b1+1; k++) A1->data[k+j*(b1+1)] = A->data[k+j*(b1+1)]; for (int j = 0; j < s; j++) for (int k = 0; k < b2+1; k++) B1->data[k+j*(b2+1)] = B->data[k+j*(b2+1)]; A1->n = B1->n = s; A1->b = b1; B1->b = b2; X(triangular_banded) * A2 = X(calloc_triangular_banded)(n-s, b1); X(triangular_banded) * B2 = X(calloc_triangular_banded)(n-s, b2); for (int j = 0; j < n-s; j++) for (int k = 0; k < b1+1; k++) A2->data[k+j*(b1+1)] = A->data[k+(j+s)*(b1+1)]; for (int j = 0; j < n-s; j++) for (int k = 0; k < b2+1; k++) B2->data[k+j*(b2+1)] = B->data[k+(j+s)*(b2+1)]; A2->n = B2->n = n-s; A2->b = b1; B2->b = b2; F->F1 = X(tb_eig_FMM)(A1, B1); F->F2 = X(tb_eig_FMM)(A2, B2); FLT * lambda1 = F->F1->lambda; FLT * lambda2 = F->F2->lambda; FLT * X = calloc(s*b, sizeof(FLT)); for (int j = 0; j < b; j++) { X[s-b+j+j*s] = 1; X(tbsv)('N', B1, X+j*s); X(bfsv)('N', F->F1, X+j*s); } FLT * Y = calloc((n-s)*b, sizeof(FLT)); for (int j = 0; j < b1; j++) for (int k = 0; k < b1-j; k++) Y[j+(k+j)*(n-s)] = A2->data[k+j*(b1+1)]; FLT * Y2 = calloc((n-s)*b2, sizeof(FLT)); for (int j = 0; j < b2; j++) for (int k = 0; k < b2-j; k++) Y2[j+(k+j)*(n-s)] = B2->data[k+j*(b2+1)]; for (int j = 0; j < b1; j++) X(bfmv)('T', F->F2, Y+j*(n-s)); for (int j = 0; j < b2; j++) X(bfmv)('T', F->F2, Y2+j*(n-s)); for (int j = 0; j < b2; j++) for (int i = 0; i < n-s; i++) Y2[i+j*(n-s)] *= lambda2[i]; for (int j = 0; j < b2; j++) for (int i = 0; i < n-s; i++) Y[i+j*(n-s)] = Y[i+j*(n-s)]-Y2[i+j*(n-s)]; F->F0 = X(sample_hierarchicalmatrix)(X(cauchykernel), lambda1, lambda2, (unitrange) {0, s}, (unitrange) {0, n-s}, 'G'); F->X = X; F->Y = Y; F->t1 = calloc(s*FT_GET_MAX_THREADS(), sizeof(FLT)); F->t2 = calloc((n-s)*FT_GET_MAX_THREADS(), sizeof(FLT)); F->n = n; F->b = b; X(destroy_triangular_banded)(A1); X(destroy_triangular_banded)(B1); X(destroy_triangular_banded)(A2); X(destroy_triangular_banded)(B2); free(Y2); } return F; } void X(scale_rows_tb_eigen_FMM)(FLT alpha, FLT * x, X(tb_eigen_FMM) * F) { int n = F->n; if (n < TB_EIGEN_BLOCKSIZE) { FLT * V = F->V; for (int j = 0; j < n; j++) for (int i = 0; i <= j; i++) V[i+j*n] *= alpha*x[i]; } else { int s = n>>1; X(scale_rows_tb_eigen_FMM)(alpha, x, F->F1); X(scale_rows_tb_eigen_FMM)(alpha, x+s, F->F2); } } void X(scale_columns_tb_eigen_FMM)(FLT alpha, FLT * x, X(tb_eigen_FMM) * F) { int n = F->n; if (n < TB_EIGEN_BLOCKSIZE) { FLT scl, * V = F->V; for (int j = 0; j < n; j++) { scl = alpha*x[j]; for (int i = 0; i <= j; i++) V[i+j*n] *= scl; } } else { int s = n>>1, b = F->b; for (int k = 0; k < b; k++) { for (int i = 0; i < s; i++) F->X[i+k*s] /= x[i]; for (int i = 0; i < n-s; i++) F->Y[i+k*(n-s)] *= x[i+s]; } X(scale_columns_tb_eigen_FMM)(alpha, x, F->F1); X(scale_columns_tb_eigen_FMM)(alpha, x+s, F->F2); } } // x ← A*x, x ← Aᵀ*x void X(trmv)(char TRANS, int n, FLT * A, int LDA, FLT * x) { if (TRANS == 'N') { for (int j = 0; j < n; j++) { for (int i = 0; i < j; i++) x[i] += A[i+j*LDA]*x[j]; x[j] *= A[j+j*LDA]; } } else if (TRANS == 'T') { for (int i = n-1; i >= 0; i--) { x[i] *= A[i+i*LDA]; for (int j = i-1; j >= 0; j--) x[i] += A[j+i*LDA]*x[j]; } } } // x ← A⁻¹*x, x ← A⁻ᵀ*x void X(trsv)(char TRANS, int n, FLT * A, int LDA, FLT * x) { if (TRANS == 'N') { for (int j = n-1; j >= 0; j--) { x[j] /= A[j+j*LDA]; for (int i = 0; i < j; i++) x[i] -= A[i+j*LDA]*x[j]; } } else if (TRANS == 'T') { for (int i = 0; i < n; i++) { for (int j = 0; j < i; j++) x[i] -= A[j+i*LDA]*x[j]; x[i] /= A[i+i*LDA]; } } } // B ← A*B, B ← Aᵀ*B #if defined(FT_USE_CBLAS_S) void X(trmm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) { if (TRANS == 'N') cblas_strmm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); else if (TRANS == 'T') cblas_strmm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); } #elif defined(FT_USE_CBLAS_D) void X(trmm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) { if (TRANS == 'N') cblas_dtrmm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); else if (TRANS == 'T') cblas_dtrmm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); } #else void X(trmm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) { #pragma omp parallel for for (int j = 0; j < N; j++) X(trmv)(TRANS, n, A, LDA, B+j*LDB); } #endif // B ← A*B, B ← Aᵀ*B #if defined(FT_USE_CBLAS_S) void X(trsm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) { if (TRANS == 'N') cblas_strsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); else if (TRANS == 'T') cblas_strsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); } #elif defined(FT_USE_CBLAS_D) void X(trsm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) { if (TRANS == 'N') cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); else if (TRANS == 'T') cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, n, N, 1, A, LDA, B, LDB); } #else void X(trsm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) { #pragma omp parallel for for (int j = 0; j < N; j++) X(trsv)(TRANS, n, A, LDA, B+j*LDB); } #endif // x ← A*x, x ← Aᵀ*x void X(bfmv)(char TRANS, X(tb_eigen_FMM) * F, FLT * x) { int n = F->n; if (n < TB_EIGEN_BLOCKSIZE) X(trmv)(TRANS, n, F->V, n, x); else { int s = n>>1, b = F->b; FLT * t1 = F->t1+s*FT_GET_THREAD_NUM(), * t2 = F->t2+(n-s)*FT_GET_THREAD_NUM(); if (TRANS == 'N') { // C(Λ₁, Λ₂) ∘ (-XYᵀ) for (int k = 0; k < b; k++) { for (int i = 0; i < n-s; i++) t2[i] = F->Y[i+k*(n-s)]*x[i+s]; X(ghmv)(TRANS, -1, F->F0, t2, 0, t1); for (int i = 0; i < s; i++) x[i] += t1[i]*F->X[i+k*s]; } X(bfmv)(TRANS, F->F1, x); X(bfmv)(TRANS, F->F2, x+s); } else if (TRANS == 'T') { X(bfmv)(TRANS, F->F1, x); X(bfmv)(TRANS, F->F2, x+s); // C(Λ₁, Λ₂) ∘ (-XYᵀ) for (int k = 0; k < b; k++) { for (int i = 0; i < s; i++) t1[i] = F->X[i+k*s]*x[i]; X(ghmv)(TRANS, -1, F->F0, t1, 0, t2); for (int i = 0; i < n-s; i++) x[i+s] += t2[i]*F->Y[i+k*(n-s)]; } } } } // x ← A⁻¹*x, x ← A⁻ᵀ*x void X(bfsv)(char TRANS, X(tb_eigen_FMM) * F, FLT * x) { int n = F->n; if (n < TB_EIGEN_BLOCKSIZE) X(trsv)(TRANS, n, F->V, n, x); else { int s = n>>1, b = F->b; FLT * t1 = F->t1+s*FT_GET_THREAD_NUM(), * t2 = F->t2+(n-s)*FT_GET_THREAD_NUM(); if (TRANS == 'N') { X(bfsv)(TRANS, F->F1, x); X(bfsv)(TRANS, F->F2, x+s); // C(Λ₁, Λ₂) ∘ (-XYᵀ) for (int k = 0; k < b; k++) { for (int i = 0; i < n-s; i++) t2[i] = F->Y[i+k*(n-s)]*x[i+s]; X(ghmv)(TRANS, 1, F->F0, t2, 0, t1); for (int i = 0; i < s; i++) x[i] += t1[i]*F->X[i+k*s]; } } else if (TRANS == 'T') { // C(Λ₁, Λ₂) ∘ (-XYᵀ) for (int k = 0; k < b; k++) { for (int i = 0; i < s; i++) t1[i] = F->X[i+k*s]*x[i]; X(ghmv)(TRANS, 1, F->F0, t1, 0, t2); for (int i = 0; i < n-s; i++) x[i+s] += t2[i]*F->Y[i+k*(n-s)]; } X(bfsv)(TRANS, F->F1, x); X(bfsv)(TRANS, F->F2, x+s); } } } void X(bfmm)(char TRANS, X(tb_eigen_FMM) * F, FLT * B, int LDB, int N) { #pragma omp parallel for for (int j = 0; j < N; j++) X(bfmv)(TRANS, F, B+j*LDB); } void X(bfsm)(char TRANS, X(tb_eigen_FMM) * F, FLT * B, int LDB, int N) { #pragma omp parallel for for (int j = 0; j < N; j++) X(bfsv)(TRANS, F, B+j*LDB); } #define delta(k) (((k)%2) ? 1 : 0) X(triangular_banded) * X(create_A_konoplev_to_jacobi)(const int n, const FLT alpha, const FLT beta) { X(triangular_banded) * A = X(calloc_triangular_banded)(n, 2); if (n > 0) X(set_triangular_banded_index)(A, 0, 0, 0); if (n > 1) { X(set_triangular_banded_index)(A, 3*(2*alpha+2*beta+3)/(2*alpha+5), 1, 1); } for (int i = 2; i < n; i++) { X(set_triangular_banded_index)(A, (i-2*beta-1)*(i+2*alpha+1)/(2*i+2*alpha-1)*(i+alpha-1)/(2*i+2*alpha+1)*(i+alpha), i-2, i); X(set_triangular_banded_index)(A, i*(i+2*alpha+2*beta+2)*(i+1)/(i+2-delta(i))*(i+2)/(i+2*alpha+2-delta(i))*(i+2*alpha+1)/(2*i+2*alpha+1)*(i+2*alpha+2)/(2*i+2*alpha+3), i, i); } return A; } X(triangular_banded) * X(create_B_konoplev_to_jacobi)(const int n, const FLT alpha) { X(triangular_banded) * B = X(calloc_triangular_banded)(n, 2); if (n > 0) X(set_triangular_banded_index)(B, 1/(2*alpha+3), 0, 0); if (n > 1) { X(set_triangular_banded_index)(B, 3/(2*alpha+5), 1, 1); } for (int i = 2; i < n; i++) { X(set_triangular_banded_index)(B, (i+alpha-1)/(2*i+2*alpha-1)*(i+alpha)/(2*i+2*alpha+1), i-2, i); X(set_triangular_banded_index)(B, (i+1+delta(i))/(2*i+2*alpha+1)*(i+2*alpha+1+delta(i))/(2*i+2*alpha+3), i, i); } return B; } #undef delta // Dᵏ P^{(α,β)} X(banded) * X(create_jacobi_derivative)(const int m, const int n, const int order, const FLT alpha, const FLT beta) { X(banded) * A = X(malloc_banded)(m, n, -order, order); FLT v; for (int j = order; j < n; j++) { v = 1; for (int k = 0; k < order; k++) v *= (j+alpha+beta+k+1)/2; X(set_banded_index)(A, v, j-order, j); } return A; } // x P^{(α,β)} X(banded) * X(create_jacobi_multiplication)(const int m, const int n, const FLT alpha, const FLT beta) { X(banded) * A = X(calloc_banded)(m, n, 1, 1); FLT v; for (int j = 0; j < n; j++) { v = 2*(j+alpha)/(2*j+alpha+beta)*(j+beta)/(2*j+alpha+beta+1); X(set_banded_index)(A, v, j-1, j); if (j == 0) v = (beta-alpha)/(alpha+beta+2); else v = (beta-alpha)*(alpha+beta)/(2*j+alpha+beta)/(2*j+alpha+beta+2); X(set_banded_index)(A, v, j, j); if (j == 0) v = 2/(alpha+beta+2); else v = 2*(j+1)/(2*j+alpha+beta+1)*(j+alpha+beta+1)/(2*j+alpha+beta+2); X(set_banded_index)(A, v, j+1, j); } return A; } // P^{(α,β)} ↗ P^{(α+1,β+1)} X(banded) * X(create_jacobi_raising)(const int m, const int n, const FLT alpha, const FLT beta) { X(banded) * A = X(calloc_banded)(m, n, 0, 2); FLT v; for (int j = 0; j < n; j++) { v = -(j+alpha)/(2*j+alpha+beta)*(j+beta)/(2*j+alpha+beta+1); X(set_banded_index)(A, v, j-2, j); v = (alpha-beta)/(2*j+alpha+beta)*(j+alpha+beta+1)/(2*j+alpha+beta+2); X(set_banded_index)(A, v, j-1, j); if (j == 0) v = 1; else v = (j+alpha+beta+1)/(2*j+alpha+beta+1)*(j+alpha+beta+2)/(2*j+alpha+beta+2); X(set_banded_index)(A, v, j, j); } return A; } // (1-x²) P^{(α+1,β+1)} ↘ P^{(α,β)} X(banded) * X(create_jacobi_lowering)(const int m, const int n, const FLT alpha, const FLT beta) { X(banded) * A = X(calloc_banded)(m, n, 2, 0); FLT v; for (int j = 0; j < n; j++) { v = 4*(j+alpha+1)/(2*j+alpha+beta+2)*(j+beta+1)/(2*j+alpha+beta+3); X(set_banded_index)(A, v, j, j); v = 4*(alpha-beta)/(2*j+alpha+beta+2)*(j+1)/(2*j+alpha+beta+4); X(set_banded_index)(A, v, j+1, j); v = -4*(j+1)/(2*j+alpha+beta+3)*(j+2)/(2*j+alpha+beta+4); X(set_banded_index)(A, v, j+2, j); } return A; } X(triangular_banded) * X(create_A_associated_jacobi_to_jacobi)(const int n, const FLT alpha, const FLT beta, const FLT gamma, const FLT delta) { X(banded) * A = X(calloc_banded)(n, n, 0, 4); X(banded) * D1 = X(create_jacobi_derivative)(n, n, 1, gamma, delta); X(banded) * D2 = X(create_jacobi_derivative)(n, n, 2, gamma, delta); X(banded) * D3 = X(create_jacobi_derivative)(n, n, 3, gamma, delta); X(banded) * D4 = X(create_jacobi_derivative)(n, n, 4, gamma, delta); X(banded) * R1 = X(create_jacobi_raising)(n, n, gamma+1, delta+1); X(banded) * L1 = X(create_jacobi_lowering)(n, n, gamma+1, delta+1); X(banded) * L2 = X(create_jacobi_lowering)(n, n, gamma+2, delta+2); X(banded) * L3 = X(create_jacobi_lowering)(n, n, gamma+3, delta+3); X(banded) * M2 = X(create_jacobi_multiplication)(n, n, gamma+2, delta+2); // A4 = (1-x²)² D⁴ X(banded) * A4a = X(calloc_banded)(n, n, -2, 4); X(gbmm)(1, L3, D4, 0, A4a); X(banded) * A4 = X(calloc_banded)(n, n, 0, 4); X(gbmm)(1, L2, A4a, 0, A4); // A3 = -10 x (1-x²) D³ X(banded) * A3a = X(calloc_banded)(n, n, -1, 3); X(gbmm)(1, L2, D3, 0, A3a); X(banded) * A3 = X(calloc_banded)(n, n, 0, 4); X(gbmm)(-10, M2, A3a, 0, A3); // A2 = [ -20(1-x²)-2ν(1+x)-4β²+16 ] D² // = [ -20(1-x²)-2νx -2ν-4β²+16 ] D² X(banded) * A2a = X(calloc_banded)(n, n, 0, 2); X(gbmm)(1, L1, D2, 0, A2a); X(banded) * A2b = X(calloc_banded)(n, n, 0, 4); X(gbmm)(1, R1, A2a, 0, A2b); X(banded) * A2c = X(calloc_banded)(n, n, -1, 3); X(gbmm)(1, M2, D2, 0, A2c); X(banded) * A2 = X(calloc_banded)(n, n, 0, 4); X(banded_add)(-20, A2b, -2*(alpha-beta)*(alpha+beta), A2c, A2); X(banded_add)(1, A2, -2*(alpha-beta)*(alpha+beta)-4*beta*beta+16, D2, A2); // A1 = -3ν D X(banded) * A1 = X(calloc_banded)(n, n, -1, 3); X(gbmm)(-3*(alpha-beta)*(alpha+beta), R1, D1, 0, A1); // A = -(A1+A2+A3+A4) X(banded_add)(-1, A1, -1, A2, A); X(banded_add)(1, A, -1, A3, A); X(banded_add)(1, A, -1, A4, A); X(destroy_banded)(D1); X(destroy_banded)(D2); X(destroy_banded)(D3); X(destroy_banded)(D4); X(destroy_banded)(R1); X(destroy_banded)(L1); X(destroy_banded)(L2); X(destroy_banded)(L3); X(destroy_banded)(M2); X(destroy_banded)(A4a); X(destroy_banded)(A4); X(destroy_banded)(A3a); X(destroy_banded)(A3); X(destroy_banded)(A2a); X(destroy_banded)(A2b); X(destroy_banded)(A2c); X(destroy_banded)(A2); X(destroy_banded)(A1); X(triangular_banded) * TA = malloc(sizeof(X(triangular_banded))); TA->data = A->data; TA->n = n; TA->b = 4; free(A); return TA; } X(triangular_banded) * X(create_B_associated_jacobi_to_jacobi)(const int n, const FLT gamma, const FLT delta) { X(banded) * B = X(calloc_banded)(n, n, 0, 4); X(banded) * D1 = X(create_jacobi_derivative)(n, n, 1, gamma, delta); X(banded) * D2 = X(create_jacobi_derivative)(n, n, 2, gamma, delta); X(banded) * R1 = X(create_jacobi_raising)(n, n, gamma+1, delta+1); X(banded) * L1 = X(create_jacobi_lowering)(n, n, gamma+1, delta+1); X(banded) * M2 = X(create_jacobi_multiplication)(n, n, gamma+2, delta+2); // B2 = [ (μ+(n+3)(n-1))(1-x²) ] D² X(banded) * B2a = X(calloc_banded)(n, n, 0, 2); X(gbmm)(1, L1, D2, 0, B2a); X(banded) * B2 = X(calloc_banded)(n, n, 0, 4); X(gbmm)(1, R1, B2a, 0, B2); // B1 = [ -3*(μ+(n+3)(n-1))x ] D X(banded) * B1a = X(calloc_banded)(n, n, -1, 3); X(gbmm)(1, R1, D1, 0, B1a); X(banded) * B1 = X(calloc_banded)(n, n, 0, 4); X(gbmm)(-3, M2, B1a, 0, B1); // B = -(B1+B2) X(banded_add)(-1, B1, -1, B2, B); X(destroy_banded)(D1); X(destroy_banded)(D2); X(destroy_banded)(R1); X(destroy_banded)(L1); X(destroy_banded)(M2); X(destroy_banded)(B2a); X(destroy_banded)(B2); X(destroy_banded)(B1a); X(destroy_banded)(B1); X(triangular_banded) * TB = malloc(sizeof(X(triangular_banded))); TB->data = B->data; TB->n = n; TB->b = 4; free(B); return TB; } X(triangular_banded) * X(create_C_associated_jacobi_to_jacobi)(const int n, const FLT gamma, const FLT delta) { X(banded) * C = X(calloc_banded)(n, n, 0, 4); X(banded) * R0 = X(create_jacobi_raising)(n, n, gamma, delta); X(banded) * R1 = X(create_jacobi_raising)(n, n, gamma+1, delta+1); X(gbmm)(1, R1, R0, 0, C); X(destroy_banded)(R0); X(destroy_banded)(R1); X(triangular_banded) * TC = malloc(sizeof(X(triangular_banded))); TC->data = C->data; TC->n = n; TC->b = 4; free(C); return TC; }
scheduler-clause.c
/* Se ve si es estatico, dinamico o guiado O bien se toma de la variable de control run-sched-var o de la variable de entorno OMP_SCHEDULE o bien omp_set_schedule(kind,modifier) 1.- run-sched-var 2.- OMP_SCHEDULE 3.- omp_set_schedule(kind,modifier) $ export OMP_SCHEDULE="static,2" $ ./bin/scheduler-clause 8 thread 0 suma a[0]=0 suma=0 thread 0 suma a[1]=1 suma=1 thread 0 suma a[6]=6 suma=7 thread 0 suma a[7]=7 suma=14 thread 2 suma a[4]=4 suma=4 thread 2 suma a[5]=5 suma=9 thread 1 suma a[2]=2 suma=2 thread 1 suma a[3]=3 suma=5 Fuera de 'parallel for' suma=14 $ export OMP_SCHEDULE="dynamic,2" $ ./bin/scheduler-clause 8 thread 1 suma a[0]=0 suma=0 thread 1 suma a[1]=1 suma=1 thread 1 suma a[6]=6 suma=7 thread 1 suma a[7]=7 suma=14 thread 0 suma a[4]=4 suma=4 thread 0 suma a[5]=5 suma=9 thread 2 suma a[2]=2 suma=2 thread 2 suma a[3]=3 suma=5 $ export OMP_SCHEDULE="guided,2" $ ./bin/scheduler-clause 8 thread 0 suma a[0]=0 suma=0 thread 0 suma a[1]=1 suma=1 thread 0 suma a[2]=2 suma=3 thread 0 suma a[7]=7 suma=10 thread 1 suma a[3]=3 suma=3 thread 1 suma a[4]=4 suma=7 thread 2 suma a[5]=5 suma=5 thread 2 suma a[6]=6 suma=11 Fuera de 'parallel for' suma=10 Representacion de los 3 tipos defindos en los ejemplos default, scheduled-clause.c, scheduleg-clause.c */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main(int argc, char **argv) { int i, n=20,a[n],suma=0; if(argc < 2) { fprintf(stderr,"\nFalta iteraciones \n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(runtime) for (i=0; i<n; i++){ suma = suma + a[i]; printf(" thread %d suma a[%d]=%d suma=%d \n", omp_get_thread_num(),i,a[i],suma); } printf("Fuera de 'parallel for' suma=%d\n",suma); }
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l a t e r a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing % smoothing filter for images. It replaces the intensity of each pixel with % a weighted average of intensity values from nearby pixels. This weight is % based on a Gaussian distribution. The weights depend not only on Euclidean % distance of pixels, but also on the radiometric differences (e.g., range % differences, such as color intensity, depth distance, etc.). This preserves % sharp edges. % % The format of the BilateralBlurImage method is: % % Image *BilateralBlurImage(const Image *image,const size_t width, % const size_t height,const double intensity_sigma, % const double spatial_sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the neighborhood in pixels. % % o height: the height of the neighborhood in pixels. % % o intensity_sigma: sigma in the intensity space. A larger value means % that farther colors within the pixel neighborhood (see spatial_sigma) % will be mixed together, resulting in larger areas of semi-equal color. % % o spatial_sigma: sigma in the coordinate space. A larger value means that % farther pixels influence each other as long as their colors are close % enough (see intensity_sigma ). When the neigborhood diameter is greater % than zero, it specifies the neighborhood size regardless of % spatial_sigma. Otherwise, the neigborhood diameter is proportional to % spatial_sigma. % % o exception: return any errors or warnings in this structure. % */ static inline double BlurDistance(const ssize_t x,const ssize_t y, const ssize_t u,const ssize_t v) { return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v))); } static inline double BlurGaussian(const double x,const double sigma) { return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))* PerceptibleReciprocal(Magick2PI*sigma*sigma)); } static double **DestroyBilateralThreadSet(const ssize_t number_threads, double **weights) { register ssize_t i; assert(weights != (double **) NULL); for (i=0; i <= (ssize_t) number_threads; i++) if (weights[i] != (double *) NULL) weights[i]=(double *) RelinquishMagickMemory(weights[i]); weights=(double **) RelinquishMagickMemory(weights); return(weights); } static double **AcquireBilateralThreadSet(const size_t number_threads, const size_t width,const size_t height) { double **weights; register ssize_t i; weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights)); if (weights == (double **) NULL) return((double **) NULL); (void) memset(weights,0,number_threads*sizeof(*weights)); for (i=0; i <= (ssize_t) number_threads; i++) { weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights)); if (weights[i] == (double *) NULL) return(DestroyBilateralThreadSet(number_threads,weights)); } return(weights); } MagickExport Image *BilateralBlurImage(const Image *image,const size_t width, const size_t height,const double intensity_sigma,const double spatial_sigma, ExceptionInfo *exception) { #define MaxIntensity (255) #define BilateralBlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double intensity_gaussian[2*(MaxIntensity+1)], *spatial_gaussian, **weights; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo mid; register ssize_t u; ssize_t n, number_threads, v; ssize_t i, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } number_threads=(size_t) GetMagickResourceLimit(ThreadResource); weights=AcquireBilateralThreadSet(number_threads,width,height); if (weights == (double **) NULL) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=(-MaxIntensity); i < MaxIntensity; i++) intensity_gaussian[i+MaxIntensity]=BlurGaussian((double) i,intensity_sigma); spatial_gaussian=weights[number_threads]; n=0; mid.x=(ssize_t) (width/2L); mid.y=(ssize_t) (height/2L); for (v=0; v < (ssize_t) height; v++) for (u=0; u < (ssize_t) width; u++) spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y), spatial_sigma); /* Bilateral blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { double gamma, pixel; register const Quantum *magick_restrict p, *magick_restrict r; register ssize_t i, u; ssize_t n, v; /* Tonal weighting preserves edges while smoothing in the flat regions. */ p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,width,height, exception); if (p == (const Quantum *) NULL) break; p+=(ssize_t) GetPixelChannels(image)*width*mid.y+GetPixelChannels(image)* mid.x; n=0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double intensity; r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))- (double) ScaleQuantumToChar(GetPixelIntensity(image,p)); if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity)) weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]* spatial_gaussian[n]; else weights[id][n]=BlurGaussian(intensity,intensity_sigma)* BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma); n++; } } for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { PixelChannel channel; PixelTrait blur_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } pixel=0.0; gamma=0.0; n=0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double alpha, beta; r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]*alpha*beta; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); } q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BilateralBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); weights=DestroyBilateralThreadSet(number_threads,weights); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you. % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanline, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanline_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanline)); if (scanline_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanline=(float *) GetVirtualMemoryBlob(scanline_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanline_info=RelinquishVirtualMemory(scanline_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum((MagickRealType) GetPixelRed(image,p)*mult),q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType) GetPixelGreen(image,p)*mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType) GetPixelBlue(image,p)*mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanline_info=RelinquishVirtualMemory(scanline_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; preview_image->alpha_trait=UndefinedPixelTrait; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); /* This kernel appears to be broken. #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif */ unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale, translate; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelThreadSet(void **pixels) { register ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelThreadSet(const size_t columns, const size_t channels,MagickBooleanType highres) { register ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel) \ (source_info->scale*QuantumScale*(pixel)+source_info->translate) #define SetLCMSPixel(target_info,pixel) \ ClampToQuantum(target_info->scale*QuantumRange*(pixel)+target_info->translate) register double *p; register ssize_t x; p=source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q)); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q)); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q)); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id], target_info->pixels[id],(unsigned int) image->columns); p=target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { register Quantum *p; register ssize_t x; p=source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id], target_info->pixels[id],(unsigned int) image->columns); p=target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; #if !defined(MAGICKCORE_HDRI_SUPPORT) const char *artifact; #endif MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; #endif if (highres != MagickFalse) { source_info.scale=1.0; source_info.translate=0.0; } source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_CMYK_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_CMYK_16; else #endif { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; source_info.scale=100.0; } break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_16; else #endif source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_Lab_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_Lab_16; else #endif { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale=100.0; source_info.translate=(-0.5); } break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_16; else #endif source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_16; else #endif source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); if (highres != MagickFalse) { target_info.scale=1.0; target_info.translate=0.0; } target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_CMYK_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_CMYK_16; else #endif { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; target_info.scale=0.01; } break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_16; else #endif target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_Lab_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_Lab_16; else #endif { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale=0.01; target_info.translate=0.5; } break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_16; else #endif target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_16; else #endif target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info, flags,cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info,transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info,transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) return(MagickFalse); xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(profile) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s'",name); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
DRB074-flush-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This benchmark is extracted from flush_nolist.1c of OpenMP Application Programming Interface Examples Version 4.5.0 . We added one critical section to make it a test with only one pair of data races. The data race will not generate wrong result though. So the assertion always passes. Data race pair: i@70:10 vs. i@71:11 */ #include "omprace.h" #include <omp.h> #include<stdio.h> #include<assert.h> void f1(int *q) { #pragma omp critical *q = 1; #pragma omp flush } int main() { omprace_init(); int i=0, sum=0; #pragma omp parallel reduction(+:sum) num_threads(10) { f1(&i); sum+=i; } assert (sum==10); printf("sum=%d\n", sum); omprace_fini(); return 0; }
convolutiondepthwise_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 1 < outw; j += 2) { __builtin_prefetch(r0 + 32); __builtin_prefetch(r1 + 32); __builtin_prefetch(r2 + 32); __builtin_prefetch(r3 + 32); v4f32 _sum00 = _bias0; v4f32 _sum01 = _bias0; v4f32 _sum10 = _bias0; v4f32 _sum11 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); _sum00 = __msa_fmadd_w(_sum00, _k00, _r00); _sum00 = __msa_fmadd_w(_sum00, _k01, _r01); _sum00 = __msa_fmadd_w(_sum00, _k02, _r02); _sum01 = __msa_fmadd_w(_sum01, _k00, _r01); _sum01 = __msa_fmadd_w(_sum01, _k01, _r02); _sum01 = __msa_fmadd_w(_sum01, _k02, _r03); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); _sum00 = __msa_fmadd_w(_sum00, _k10, _r10); _sum00 = __msa_fmadd_w(_sum00, _k11, _r11); _sum00 = __msa_fmadd_w(_sum00, _k12, _r12); _sum01 = __msa_fmadd_w(_sum01, _k10, _r11); _sum01 = __msa_fmadd_w(_sum01, _k11, _r12); _sum01 = __msa_fmadd_w(_sum01, _k12, _r13); _sum10 = __msa_fmadd_w(_sum10, _k00, _r10); _sum10 = __msa_fmadd_w(_sum10, _k01, _r11); _sum10 = __msa_fmadd_w(_sum10, _k02, _r12); _sum11 = __msa_fmadd_w(_sum11, _k00, _r11); _sum11 = __msa_fmadd_w(_sum11, _k01, _r12); _sum11 = __msa_fmadd_w(_sum11, _k02, _r13); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); _sum00 = __msa_fmadd_w(_sum00, _k20, _r20); _sum00 = __msa_fmadd_w(_sum00, _k21, _r21); _sum00 = __msa_fmadd_w(_sum00, _k22, _r22); _sum01 = __msa_fmadd_w(_sum01, _k20, _r21); _sum01 = __msa_fmadd_w(_sum01, _k21, _r22); _sum01 = __msa_fmadd_w(_sum01, _k22, _r23); _sum10 = __msa_fmadd_w(_sum10, _k10, _r20); _sum10 = __msa_fmadd_w(_sum10, _k11, _r21); _sum10 = __msa_fmadd_w(_sum10, _k12, _r22); _sum11 = __msa_fmadd_w(_sum11, _k10, _r21); _sum11 = __msa_fmadd_w(_sum11, _k11, _r22); _sum11 = __msa_fmadd_w(_sum11, _k12, _r23); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); _sum10 = __msa_fmadd_w(_sum10, _k20, _r30); _sum10 = __msa_fmadd_w(_sum10, _k21, _r31); _sum10 = __msa_fmadd_w(_sum10, _k22, _r32); _sum11 = __msa_fmadd_w(_sum11, _k20, _r31); _sum11 = __msa_fmadd_w(_sum11, _k21, _r32); _sum11 = __msa_fmadd_w(_sum11, _k22, _r33); __msa_st_w((v4i32)_sum00, outptr0, 0); __msa_st_w((v4i32)_sum01, outptr0 + 4, 0); __msa_st_w((v4i32)_sum10, outptr1, 0); __msa_st_w((v4i32)_sum11, outptr1 + 4, 0); outptr0 += 4 * 2; outptr1 += 4 * 2; r0 += 4 * 2; r1 += 4 * 2; r2 += 4 * 2; r3 += 4 * 2; } for (; j < outw; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(r1 + 16); __builtin_prefetch(r2 + 16); __builtin_prefetch(r3 + 16); v4f32 _sum0 = _bias0; v4f32 _sum1 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum1 = __msa_fmadd_w(_sum1, _k00, _r10); _sum1 = __msa_fmadd_w(_sum1, _k01, _r11); _sum1 = __msa_fmadd_w(_sum1, _k02, _r12); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum1 = __msa_fmadd_w(_sum1, _k10, _r20); _sum1 = __msa_fmadd_w(_sum1, _k11, _r21); _sum1 = __msa_fmadd_w(_sum1, _k12, _r22); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); _sum1 = __msa_fmadd_w(_sum1, _k20, _r30); _sum1 = __msa_fmadd_w(_sum1, _k21, _r31); _sum1 = __msa_fmadd_w(_sum1, _k22, _r32); __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr1, 0); outptr0 += 4; outptr1 += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; } r0 += 2 * 4 + w * 4; r1 += 2 * 4 + w * 4; r2 += 2 * 4 + w * 4; r3 += 2 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { __builtin_prefetch(r0 + 32); __builtin_prefetch(r1 + 32); __builtin_prefetch(r2 + 32); v4f32 _sum00 = _bias0; v4f32 _sum01 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); _sum00 = __msa_fmadd_w(_sum00, _k00, _r00); _sum00 = __msa_fmadd_w(_sum00, _k01, _r01); _sum00 = __msa_fmadd_w(_sum00, _k02, _r02); _sum01 = __msa_fmadd_w(_sum01, _k00, _r01); _sum01 = __msa_fmadd_w(_sum01, _k01, _r02); _sum01 = __msa_fmadd_w(_sum01, _k02, _r03); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); _sum00 = __msa_fmadd_w(_sum00, _k10, _r10); _sum00 = __msa_fmadd_w(_sum00, _k11, _r11); _sum00 = __msa_fmadd_w(_sum00, _k12, _r12); _sum01 = __msa_fmadd_w(_sum01, _k10, _r11); _sum01 = __msa_fmadd_w(_sum01, _k11, _r12); _sum01 = __msa_fmadd_w(_sum01, _k12, _r13); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); _sum00 = __msa_fmadd_w(_sum00, _k20, _r20); _sum00 = __msa_fmadd_w(_sum00, _k21, _r21); _sum00 = __msa_fmadd_w(_sum00, _k22, _r22); _sum01 = __msa_fmadd_w(_sum01, _k20, _r21); _sum01 = __msa_fmadd_w(_sum01, _k21, _r22); _sum01 = __msa_fmadd_w(_sum01, _k22, _r23); __msa_st_w((v4i32)_sum00, outptr0, 0); __msa_st_w((v4i32)_sum01, outptr0 + 4, 0); outptr0 += 4 * 2; r0 += 4 * 2; r1 += 4 * 2; r2 += 4 * 2; } for (; j < outw; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(r1 + 16); __builtin_prefetch(r2 + 16); v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4; r1 += 4; r2 += 4; } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } } } static void convdw3x3s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 5, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0 + 4 * 6, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4 * 7, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 8, 0); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { __builtin_prefetch(r0 + 64); __builtin_prefetch(r1 + 64); __builtin_prefetch(r2 + 64); v4f32 _sum00 = _bias0; v4f32 _sum01 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); _sum00 = __msa_fmadd_w(_sum00, _k00, _r00); _sum00 = __msa_fmadd_w(_sum00, _k01, _r01); _sum00 = __msa_fmadd_w(_sum00, _k02, _r02); _sum01 = __msa_fmadd_w(_sum01, _k00, _r02); _sum01 = __msa_fmadd_w(_sum01, _k01, _r03); _sum01 = __msa_fmadd_w(_sum01, _k02, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); _sum00 = __msa_fmadd_w(_sum00, _k10, _r10); _sum00 = __msa_fmadd_w(_sum00, _k11, _r11); _sum00 = __msa_fmadd_w(_sum00, _k12, _r12); _sum01 = __msa_fmadd_w(_sum01, _k10, _r12); _sum01 = __msa_fmadd_w(_sum01, _k11, _r13); _sum01 = __msa_fmadd_w(_sum01, _k12, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); _sum00 = __msa_fmadd_w(_sum00, _k20, _r20); _sum00 = __msa_fmadd_w(_sum00, _k21, _r21); _sum00 = __msa_fmadd_w(_sum00, _k22, _r22); _sum01 = __msa_fmadd_w(_sum01, _k20, _r22); _sum01 = __msa_fmadd_w(_sum01, _k21, _r23); _sum01 = __msa_fmadd_w(_sum01, _k22, _r24); __msa_st_w((v4i32)_sum00, outptr0, 0); __msa_st_w((v4i32)_sum01, outptr0 + 4, 0); outptr0 += 4 * 2; r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; } for (; j < outw; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(r1 + 32); __builtin_prefetch(r2 + 32); v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4 * 2; r1 += 4 * 2; r2 += 4 * 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
mvt.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "mvt.h" /* Array initialization. */ static void init_array(int n, DATA_TYPE POLYBENCH_1D(x1,N,n), DATA_TYPE POLYBENCH_1D(x2,N,n), DATA_TYPE POLYBENCH_1D(y_1,N,n), DATA_TYPE POLYBENCH_1D(y_2,N,n), DATA_TYPE POLYBENCH_2D(A,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) { x1[i] = ((DATA_TYPE) i) / n; x2[i] = ((DATA_TYPE) i + 1) / n; y_1[i] = ((DATA_TYPE) i + 3) / n; y_2[i] = ((DATA_TYPE) i + 4) / n; for (j = 0; j < n; j++) A[i][j] = ((DATA_TYPE) i*j) / N; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_1D(x1,N,n), DATA_TYPE POLYBENCH_1D(x2,N,n)) { int i; for (i = 0; i < n; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, x1[i]); fprintf (stderr, DATA_PRINTF_MODIFIER, x2[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_mvt(int n, DATA_TYPE POLYBENCH_1D(x1,N,n), DATA_TYPE POLYBENCH_1D(x2,N,n), DATA_TYPE POLYBENCH_1D(y_1,N,n), DATA_TYPE POLYBENCH_1D(y_2,N,n), DATA_TYPE POLYBENCH_2D(A,N,N,n,n)) { int i, j; #pragma scop #pragma omp parallel private (j) { #pragma omp for for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_N; j++) x1[i] = x1[i] + A[i][j] * y_1[j]; #pragma omp for for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_N; j++) x2[i] = x2[i] + A[j][i] * y_2[j]; } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, n, n); POLYBENCH_1D_ARRAY_DECL(x1, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(x2, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(y_1, DATA_TYPE, N, n); POLYBENCH_1D_ARRAY_DECL(y_2, DATA_TYPE, N, n); /* Initialize array(s). */ init_array (n, POLYBENCH_ARRAY(x1), POLYBENCH_ARRAY(x2), POLYBENCH_ARRAY(y_1), POLYBENCH_ARRAY(y_2), POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_mvt (n, POLYBENCH_ARRAY(x1), POLYBENCH_ARRAY(x2), POLYBENCH_ARRAY(y_1), POLYBENCH_ARRAY(y_2), POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(x1), POLYBENCH_ARRAY(x2))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x1); POLYBENCH_FREE_ARRAY(x2); POLYBENCH_FREE_ARRAY(y_1); POLYBENCH_FREE_ARRAY(y_2); return 0; }
flexProxDualL2Inf.h
#ifndef flexProxL2Inf_H #define flexProxL2Inf_H #include "flexProx.h" //! represents prox for a L2,inf non-data term /*! \f$ \frac{\alpha}{2} \|\cdot\|_{2,\inf} \f$ */ template<typename T> class flexProxDualL2Inf : public flexProx<T> { #ifdef __CUDACC__ typedef thrust::device_vector<T> Tdata; #else typedef std::vector<T> Tdata; #endif public: flexProxDualL2Inf() : flexProx<T>(dualL2Prox) { } ~flexProxDualL2Inf() { if (VERBOSE > 0) printf("Destructor prox\n!"); } #ifdef __CUDACC__ struct AbsFunctor { __host__ __device__ T operator()(const T& x) const { return (T)abs(x); } }; struct L21NormDim2 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = sqrt(pow(thrust::get<1>(t),2) + pow(thrust::get<2>(t),2)); } }; struct L21NormDim3 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = sqrt(pow(thrust::get<1>(t), 2) + pow(thrust::get<2>(t), 2) + pow(thrust::get<3>(t), 2)); } }; struct FindTransform { FindTransform(T aAlpha) : alpha(aAlpha) { } template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) - thrust::get<2>(t) * thrust::get<3>(t) - alpha; } T alpha; }; struct GreaterEqualZero { __host__ __device__ bool operator()(T val) { return (val >= (T)0); } }; struct ResultFunctor { ResultFunctor(T aLambda) : lambda(aLambda) { } template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = (thrust::get<1>(t) > lambda) ? ((T)1 - (lambda / thrust::get<1>(t))) * thrust::get<2>(t) : 0; } T lambda; }; #endif void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers) { int numElements = (int)data->yTilde[dualNumbers[0]].size(); #ifdef __CUDACC__ Tdata yTildeNorm(numElements); if (dualNumbers.size() == 1) { auto startIterator = data->yTilde[dualNumbers[0]].begin(); auto endIterator = data->yTilde[dualNumbers[0]].end(); thrust::transform(startIterator, endIterator, yTildeNorm.begin(), AbsFunctor()); } else if (dualNumbers.size() == 2) { auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(yTildeNorm.begin(), data->yTilde[dualNumbers[0]].begin(), data->yTilde[dualNumbers[1]].begin())); auto endIterator = thrust::make_zip_iterator(thrust::make_tuple(yTildeNorm.end(), data->yTilde[dualNumbers[0]].end(), data->yTilde[dualNumbers[1]].end())); thrust::for_each(startIterator, endIterator, L21NormDim2()); } else if (dualNumbers.size() == 3) { auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(yTildeNorm.begin(), data->yTilde[dualNumbers[0]].begin(), data->yTilde[dualNumbers[1]].begin(), data->yTilde[dualNumbers[2]].begin())); auto endIterator = thrust::make_zip_iterator(thrust::make_tuple(yTildeNorm.end(), data->yTilde[dualNumbers[0]].end(), data->yTilde[dualNumbers[1]].end(), data->yTilde[dualNumbers[2]].end())); thrust::for_each(startIterator, endIterator, L21NormDim3()); } else { printf("Alert! Iso prox not implemented in CUDA for dim>3"); } Tdata sortyTildeNorm(yTildeNorm); thrust::sort(sortyTildeNorm.begin(), sortyTildeNorm.end(), thrust::greater<T>()); Tdata yTildeSum(yTildeNorm.size()); Tdata g(yTildeNorm.size()); thrust::exclusive_scan(sortyTildeNorm.begin(), sortyTildeNorm.end(), yTildeSum.begin()); thrust::counting_iterator<int> first(0); thrust::counting_iterator<int> last(yTildeNorm.size()); auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(g.begin(), yTildeSum.begin(), first, sortyTildeNorm.begin())); auto endIterator = thrust::make_zip_iterator(thrust::make_tuple(g.end(), yTildeSum.end(), last, sortyTildeNorm.end())); thrust::for_each(startIterator, endIterator, FindTransform(alpha)); auto findIter = g.begin(); findIter = thrust::find_if(g.begin(), g.end(), GreaterEqualZero()); T lambda = 0; int index = -1; if (findIter != g.end() && findIter != g.begin()) { index = thrust::distance(g.begin(), findIter); lambda = (yTildeSum[index] - alpha) / index; } for (int k = 0; k < dualNumbers.size(); k++) { auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(data->y[dualNumbers[k]].begin(), yTildeNorm.begin(), data->yTilde[dualNumbers[k]].begin())); auto endIterator = thrust::make_zip_iterator(thrust::make_tuple(data->y[dualNumbers[k]].end(), yTildeNorm.end(), data->yTilde[dualNumbers[k]].end())); thrust::for_each(startIterator, endIterator, ResultFunctor(lambda)); } #else std::vector<T> yTildeNorm(numElements); T* ptrYTildeNorm = yTildeNorm.data(); if (dualNumbers.size() == 1) { T* ptrYtilde0 = data->yTilde[dualNumbers[0]].data(); #pragma omp parallel for for (int i = 0; i < numElements; i++) { ptrYTildeNorm[i] = fabs(ptrYtilde0[i]); } } else if (dualNumbers.size() == 2) { T* ptrYtilde0 = data->yTilde[dualNumbers[0]].data(); T* ptrYtilde1 = data->yTilde[dualNumbers[1]].data(); #pragma omp parallel for for (int i = 0; i < numElements; i++) { ptrYTildeNorm[i] = std::sqrt(ptrYtilde0[i] * ptrYtilde0[i] + ptrYtilde1[i] * ptrYtilde1[i]); } } else if (dualNumbers.size() == 3) { T* ptrYtilde0 = data->yTilde[dualNumbers[0]].data(); T* ptrYtilde1 = data->yTilde[dualNumbers[1]].data(); T* ptrYtilde2 = data->yTilde[dualNumbers[2]].data(); #pragma omp parallel for for (int i = 0; i < numElements; i++) { ptrYTildeNorm[i] = (T)sqrtf(pow2(ptrYtilde0[i]) + pow2(ptrYtilde1[i]) + pow2(ptrYtilde2[i])); } } else printf("Alert! L2,inf prox not implemented for dim>3"); std::vector<T> sortyTildeNorm(yTildeNorm); std::sort(std::begin(sortyTildeNorm), std::end(sortyTildeNorm), std::greater<T>()); T yTildeSum = 0; T g = -alpha; T lambda = 0; int index; for (index = 1; index < numElements; index++) { T lambda = sortyTildeNorm[index]; yTildeSum += sortyTildeNorm[index - 1]; g = yTildeSum - index * lambda - alpha; if (g >= 0) break; } if (g < 0) lambda = 0; else lambda = (yTildeSum - alpha) / index; #pragma omp parallel for for (int k = 0; k < dualNumbers.size(); k++) { T* ptrY = data->y[dualNumbers[k]].data(); T* ptrYtilde = data->yTilde[dualNumbers[k]].data(); #pragma omp parallel for for (int i = 0; i < numElements; i++) { ptrY[i] = (ptrYTildeNorm[i] > lambda) ? ((T)1 - (lambda / ptrYTildeNorm[i])) * ptrYtilde[i] : 0; } } #endif } void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers, std::vector<Tdata> &fList) { } }; #endif
GB_binop__minus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint8) // A*D function (colscale): GB (_AxD__minus_uint8) // D*A function (rowscale): GB (_DxB__minus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint8) // C=scalar+B GB (_bind1st__minus_uint8) // C=scalar+B' GB (_bind1st_tran__minus_uint8) // C=A+scalar GB (_bind2nd__minus_uint8) // C=A'+scalar GB (_bind2nd_tran__minus_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT8 || GxB_NO_MINUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
diagsm_x_csr_n_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_Number diag[A->rows]; memset(diag, '\0', A->rows * sizeof(ALPHA_Number)); int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 0; r < A->rows; r++) { for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++) { ALPHA_INT ac = A->col_indx[ai]; if (ac == r) { diag[r] = A->values[ai]; } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < A->rows; ++r) { ALPHA_Number t; alpha_setzero(t); alpha_mul(t, alpha, x[index2(c, r, ldx)]); alpha_div(y[index2(c, r, ldy)], t, diag[r]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
trmm_x_csr_u_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT *acc_nnz = alpha_malloc(sizeof(ALPHA_INT) * mat->rows); memset(acc_nnz, '\0', mat->rows * sizeof(ALPHA_INT)); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT ar = 0; ar < mat->rows; ar++) { for (ALPHA_INT ai = mat->rows_start[ar]; ai < mat->rows_end[ar]; ai++) { if (mat->col_indx[ai] > ar) { acc_nnz[ar] += 1; } } } for (ALPHA_INT i = 1; i < mat->rows; i++) { acc_nnz[i] += acc_nnz[i - 1]; } ALPHA_INT *partition = alpha_malloc((num_threads + 1) * sizeof(ALPHA_INT)); balanced_partition_row_by_nnz(acc_nnz,mat->rows, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_m_s = partition[tid]; ALPHA_INT local_m_e = partition[tid + 1]; for (ALPHA_INT r = local_m_s; r < local_m_e; ++r) { ALPHA_Number *Y = &y[index2(r, 0, ldy)]; for (ALPHA_INT c = 0; c <columns; c++) { ALPHA_Number ctmp; alpha_mul(Y[c], Y[c], beta); alpha_mul(ctmp, x[index2(r, c, ldx)], alpha); alpha_add(Y[c], ctmp, Y[c]); } for (ALPHA_INT ai = mat->rows_start[r]; ai < mat->rows_end[r]; ai++) { ALPHA_INT ac = mat->col_indx[ai]; if (ac > r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); const ALPHA_Number *X = &x[index2(ac, 0, ldx)]; for (ALPHA_INT c = 0; c <columns; ++c) alpha_madde(Y[c], X[c], val); } } } } alpha_free(partition); alpha_free(acc_nnz); return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_binop__lor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_08__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_02__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_04__lor_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int8) // A*D function (colscale): GB (_AxD__lor_int8) // D*A function (rowscale): GB (_DxB__lor_int8) // C+=B function (dense accum): GB (_Cdense_accumB__lor_int8) // C+=b function (dense accum): GB (_Cdense_accumb__lor_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int8) // C=scalar+B GB (_bind1st__lor_int8) // C=scalar+B' GB (_bind1st_tran__lor_int8) // C=A+scalar GB (_bind2nd__lor_int8) // C=A'+scalar GB (_bind2nd_tran__lor_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT8 || GxB_NO_LOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale[4], translate[4]; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelTLS(void **pixels) { ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelTLS(const size_t columns,const size_t channels, MagickBooleanType highres) { ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelTLS(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformTLS(cmsHTRANSFORM *transform) { ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformTLS(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformTLS(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel,index) \ (source_info->scale[index]*((QuantumScale*pixel)+source_info->translate[index])) #define SetLCMSPixel(target_info,pixel,index) \ ClampToQuantum(target_info->scale[index]*((QuantumRange*pixel)+target_info->translate[index])) double *p; ssize_t x; p=(double *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q),0); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q),1); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q),2); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q),3); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(double *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p,0),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p,0),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p,1),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p,2),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p,3),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { Quantum *p; ssize_t x; p=(Quantum *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(Quantum *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } static inline void SetLCMSInfoTranslate(LCMSInfo *info,const double translate) { info->translate[0]=translate; info->translate[1]=translate; info->translate[2]=translate; info->translate[3]=translate; } static inline void SetLCMSInfoScale(LCMSInfo *info,const double scale) { info->scale[0]=scale; info->scale[1]=scale; info->scale[2]=scale; info->scale[3]=scale; } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(name != (const char *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) { profile=DestroyStringInfo(profile); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { profile=DestroyStringInfo(profile); cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH > 16) { const char *artifact; artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; } #endif SetLCMSInfoScale(&source_info,1.0); SetLCMSInfoTranslate(&source_info,0.0); source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&source_info,100.0); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale[0]=100.0; source_info.scale[1]=255.0; source_info.scale[2]=255.0; source_info.translate[1]=(-0.5); source_info.translate[2]=(-0.5); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); SetLCMSInfoScale(&target_info,1.0); SetLCMSInfoTranslate(&target_info,0.0); target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&target_info,0.01); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale[0]=0.01; target_info.scale[1]=1/255.0; target_info.scale[2]=1/255.0; target_info.translate[1]=0.5; target_info.translate[2]=0.5; } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformTLS(&source_info,&target_info,flags, cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelTLS(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelTLS(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelTLS(target_info.pixels); source_info.pixels=DestroyPixelTLS(source_info.pixels); transform=DestroyTransformTLS(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelTLS(target_info.pixels); source_info.pixels=DestroyPixelTLS(source_info.pixels); transform=DestroyTransformTLS(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info, transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info, transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelTLS(target_info.pixels); source_info.pixels=DestroyPixelTLS(source_info.pixels); transform=DestroyTransformTLS(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static void PatchCorruptProfile(const char *name,StringInfo *profile) { unsigned char *p; size_t length; /* Detect corrupt profiles and if discovered, repair. */ if (LocaleCompare(name,"xmp") == 0) { /* Remove garbage after xpacket end. */ p=GetStringInfoDatum(profile); p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>"); if (p != (unsigned char *) NULL) { p+=19; length=p-GetStringInfoDatum(profile); if (length != GetStringInfoLength(profile)) { *p='\0'; SetStringInfoLength(profile,length); } } return; } if (LocaleCompare(name,"exif") == 0) { /* Check if profile starts with byte order marker instead of Exif. */ p=GetStringInfoDatum(profile); if ((LocaleNCompare((const char *) p,"MM",2) == 0) || (LocaleNCompare((const char *) p,"II",2) == 0)) { const unsigned char profile_start[] = "Exif\0\0"; StringInfo *exif_profile; exif_profile=AcquireStringInfo(6); if (exif_profile != (StringInfo *) NULL) { SetStringInfoDatum(exif_profile,profile_start); ConcatenateStringInfo(exif_profile,profile); SetStringInfoLength(profile,GetStringInfoLength(exif_profile)); SetStringInfo(profile,exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s' (XMP)",image->filename); return(MagickFalse); } xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","'%s' (XML)", image->filename); return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; StringInfo *clone_profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); clone_profile=CloneStringInfo(profile); PatchCorruptProfile(name,clone_profile); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(image,clone_profile,exception) == MagickFalse)) { clone_profile=DestroyStringInfo(clone_profile); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),clone_profile); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,clone_profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,clone_profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType SyncExifProfile(const Image *image,unsigned char *exif, size_t length) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack] = { 0 }; EndianType endian; size_t entry, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory; if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,((size_t) image->units)+1,p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } static MagickBooleanType Sync8BimProfile(const Image *image, const StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*2.54*65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*2.54*65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } if (id == 0x0422) (void) SyncExifProfile(image,p,count); p+=count; length-=count; } return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,GetStringInfoDatum(profile), GetStringInfoLength(profile)) == MagickFalse) status=MagickFalse; return(status); } static void UpdateClipPath(unsigned char *blob,size_t length, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { ssize_t i; ssize_t knot_count, selector; knot_count=0; while (length != 0) { selector=(ssize_t) ReadProfileMSBShort(&blob,&length); switch (selector) { case 0: case 3: { if (knot_count != 0) { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Expected subpath length record. */ knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length); blob+=22; length-=MagickMin(22,(ssize_t) length); break; } case 1: case 2: case 4: case 5: { if (knot_count == 0) { /* Unexpected subpath knot. */ blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Add sub-path knot */ for (i=0; i < 3; i++) { double x, y; signed int xx, yy; y=(double) ReadProfileMSBLong(&blob,&length); y=y*old_rows/4096.0/4096.0; y-=new_geometry->y; yy=(signed int) ((y*4096*4096)/new_geometry->height); WriteProfileLong(MSBEndian,(size_t) yy,blob-4); x=(double) ReadProfileMSBLong(&blob,&length); x=x*old_columns/4096.0/4096.0; x-=new_geometry->x; xx=(signed int) ((x*4096*4096)/new_geometry->width); WriteProfileLong(MSBEndian,(size_t) xx,blob-4); } knot_count--; break; } case 6: case 7: case 8: default: { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } } } } MagickPrivate void Update8BIMClipPath(const Image *image, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { const StringInfo *profile; size_t length; ssize_t count, id; unsigned char *info; assert(image != (Image *) NULL); assert(new_geometry != (RectangleInfo *) NULL); profile=GetImageProfile(image,"8bim"); if (profile == (StringInfo *) NULL) return; length=GetStringInfoLength(profile); info=GetStringInfoDatum(profile); while (length > 0) { if (ReadProfileByte(&info,&length) != (unsigned char) '8') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'B') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'I') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'M') continue; id=(ssize_t) ReadProfileMSBShort(&info,&length); count=(ssize_t) ReadProfileByte(&info,&length); if ((count != 0) && ((size_t) count <= length)) { info+=count; length-=count; } if ((count & 0x01) == 0) (void) ReadProfileByte(&info,&length); count=(ssize_t) ReadProfileMSBLong(&info,&length); if ((count < 0) || ((size_t) count > length)) { length=0; continue; } if ((id > 1999) && (id < 2999)) UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry); info+=count; length-=MagickMin(count,(ssize_t) length); } }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickPixelPacket target[3], zero; RectangleInfo bounds; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[2]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (QuantumRange <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; if ((channel & RedChannel) != 0) { pixel=GetPixelRed(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & GreenChannel) != 0) { pixel=GetPixelGreen(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & BlueChannel) != 0) { pixel=GetPixelBlue(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { pixel=GetPixelOpacity(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel=GetPixelIndex(indexes+x); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse) atDepth=MagickTrue; if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); ((Image *) image)->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) ((Image *) image)->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; MagickBooleanType status; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->colorspace=GRAYColorspace; status=SyncImagePixelCache((Image *) image,exception); if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(status); ((Image *) image)->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const PixelPacket *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ static inline Quantum ClampPixel(const MagickRealType value) { #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) value); #else if (value < 0.0f) return(0.0f); if (value >= (MagickRealType) QuantumRange) return((Quantum) QuantumRange); return(value); #endif } MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((channel & GreenChannel) != 0) image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].opacity),range),range); } } status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (QuantumRange <= MaxMap) RestoreMSCWarning { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelRed(q)),range),range)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelGreen(q)),range),range)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelBlue(q)),range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelOpacity(q)),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType, % PaletteMatteType, TrueColorType, TrueColorMatteType, % ColorSeparationType, ColorSeparationMatteType, OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); (void) NormalizeImage(image); if (IsMonochromeImage(image,&image->exception) == MagickFalse) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->colors=2; image->matte=MagickFalse; break; } case GrayscaleType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); image->matte=MagickFalse; break; } case GrayscaleMatteType: { if (IsGrayImage(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case PaletteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case PaletteBilevelMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); (void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case TrueColorMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case ColorSeparationType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case ColorSeparationMatteType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); }
declare_variant_messages.c
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s #pragma omp declare // expected-error {{expected an OpenMP directive}} int foo(void); #pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}} #pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} #pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}} #pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}} #pragma omp declare variant(foo) match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} #pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} #pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}} #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'isa' 'arch'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} #pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} #pragma omp declare variant(foo) match(device={kind(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}} #pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}} int bar(void); #pragma omp declare variant(foo) match(implementation = {vendor(score(foo) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo is not and will be ignored}} #pragma omp declare variant(foo) match(implementation = {vendor(score(foo()) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}} #pragma omp declare variant(foo) match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} #pragma omp declare variant(foo) match(user = {condition(foo)}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo is not}} #pragma omp declare variant(foo) match(user = {condition(foo())}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo() is not}} #pragma omp declare variant(foo) match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}} int score_and_cond_non_const(); #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int a; // expected-error {{'#pragma omp declare variant' can only be applied to functions}} #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp threadprivate(a) // expected-error {{'#pragma omp declare variant' can only be applied to functions}} int var; #pragma omp threadprivate(var) #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare // expected-error {{expected an OpenMP directive}} #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma options align=packed int main(); #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma init_seg(compiler) int main(); #pragma omp declare variant(foo) match(xxx={}) // expected-error {{single declaration is expected after 'declare variant' directive}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int b, c; int no_proto(); #pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int no_proto_too(); int proto1(int); #pragma omp declare variant(proto1) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int diff_proto(); // expected-note {{previous declaration is here}} int diff_proto(double); // expected-error {{conflicting types for 'diff_proto'}} #pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int diff_proto1(double); int after_use_variant(void); int after_use(); int bar() { return after_use(); } #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int after_use(void); #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int defined(void) { return 0; } int defined1(void) { return 0; } #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int defined1(void); int diff_cc_variant(void); #pragma omp declare variant(diff_cc_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} __vectorcall int diff_cc(void); int diff_ret_variant(void); #pragma omp declare variant(diff_ret_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} void diff_ret(void); void marked(void); void not_marked(void); #pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)}) // expected-note {{marked as 'declare variant' here}} void marked_variant(void); #pragma omp declare variant(marked_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} void marked(void); #pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
nw.c
#define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <string.h> #include <math.h> #include <time.h> #include <sys/time.h> void runTestParallel( int dim, int pen); void runTestNonParallel( int dim, int pen); int test_seq[1024]; int test_par[1024]; int cnt; int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty>\n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); exit(1); } int main(){ srand(1234); printf("2048, 10"); printf("\n \n STARTING SEQUENTIAL\n"); runTestNonParallel(2048, 10); srand(1234); printf("\n \n STARTING PARALLEL\n"); runTestParallel(2048, 10); if(memcmp(test_seq,test_par,cnt*sizeof(int)) == 0) printf("\nTest PASSED \n\n"); else printf("\nTest FAILED \n\n"); srand(1234); printf("6144, 10"); printf("\n \n STARTING SEQUENTIAL\n"); runTestNonParallel(6144, 10); srand(1234); printf("\n \n STARTING PARALLEL\n"); runTestParallel(6144, 10); if(memcmp(test_seq,test_par,cnt*sizeof(int)) == 0) printf("\nTest PASSED \n\n"); else printf("\nTest FAILED \n\n"); srand(1234); printf("16384, 10"); printf("\n \n STARTING SEQUENTIAL\n"); runTestNonParallel(16384, 10); srand(1234); printf("\n \n STARTING PARALLEL\n"); runTestParallel(16384, 10); if(memcmp(test_seq,test_par,cnt*sizeof(int)) == 0) printf("\nTest PASSED \n\n"); else printf("\nTest FAILED \n\n"); srand(1234); printf("22528, 10"); printf("\n \n STARTING SEQUENTIAL\n"); runTestNonParallel(22528, 10); srand(1234); printf("\n \n STARTING PARALLEL\n"); runTestParallel(22528, 10); if(memcmp(test_seq,test_par,cnt*sizeof(int)) == 0) printf("\nTest PASSED \n\n"); else printf("\nTest FAILED \n\n"); return EXIT_SUCCESS; } void runTestNonParallel( int dim, int pen) { int max_rows, max_cols, penalty,idx, index; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *matrix_cuda_out, *referrence_cuda; int size; int omp_num_threads; max_cols = max_rows = dim; penalty = pen; max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); double timeStart = omp_get_wtime(); for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } printf("Start Needleman-Wunsch\n"); for( int i=1; i< max_rows ; i++){ input_itemsets[i*max_cols] = rand() % 10 + 1; referrence[i*max_cols] = 0; } for( int j=1; j< max_cols ; j++){ input_itemsets[j] = rand() % 10 + 1; referrence[j] = 0; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; printf("Processing top-left matrix\n"); for( int i = 0 ; i < max_cols-2 ; i++){ for( idx = 0 ; idx <= i ; idx++){ index = (idx + 1) * max_cols + (i + 1 - idx); input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], input_itemsets[index-1] - penalty, input_itemsets[index-max_cols] - penalty); } } printf("Processing bottom-right matrix\n"); for( int i = max_cols - 4 ; i >= 0 ; i--){ for( idx = 0 ; idx <= i ; idx++){ index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ; input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], input_itemsets[index-1] - penalty, input_itemsets[index-max_cols] - penalty); } } double timeStop = omp_get_wtime(); printf("Elapsed time (sequetial): %f \n", timeStop-timeStart); #define TRACEBACK #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); cnt = 0; fprintf(fpo, "print traceback value:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ){ fprintf(fpo, "%d ", input_itemsets[ i * max_cols + j]); test_seq[cnt] = input_itemsets[ i * max_cols + j]; cnt++; } if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = input_itemsets[(i - 1) * max_cols + j - 1]; w = input_itemsets[ i * max_cols + j - 1 ]; n = input_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = input_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = input_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); #endif free(referrence); free(input_itemsets); free(output_itemsets); } void runTestParallel( int dim, int pen) { int max_rows, max_cols, penalty,idx, index; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *matrix_cuda_out, *referrence_cuda; int size; int omp_num_threads; max_cols = max_rows = dim; penalty = pen; max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); double timeStart = omp_get_wtime(); #pragma omp parallel for collapse(2) for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } printf("Start Needleman-Wunsch\n"); for( int i=1; i< max_rows ; i++){ input_itemsets[i*max_cols] = rand() % 10 + 1; referrence[i*max_cols] = 0; } for( int j=1; j< max_cols ; j++){ input_itemsets[j] = rand() % 10 + 1; referrence[j] = 0; } #pragma omp parallel for collapse(2) for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } #pragma omp parallel for for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; #pragma omp parallel for for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; printf("Processing top-left matrix\n"); for( int i = 0 ; i < max_cols-2 ; i++){ #pragma omp parallel for\ private(idx, index) \ shared(i) for( idx = 0 ; idx <= i ; idx++){ index = (idx + 1) * max_cols + (i + 1 - idx); input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], input_itemsets[index-1] - penalty, input_itemsets[index-max_cols] - penalty); } } printf("Processing bottom-right matrix\n"); for( int i = max_cols - 4 ; i >= 0 ; i--){ #pragma omp parallel for\ private(idx, index)\ shared(i) for( idx = 0 ; idx <= i ; idx++){ index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ; input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], input_itemsets[index-1] - penalty, input_itemsets[index-max_cols] - penalty); } } double timeStop = omp_get_wtime(); printf("Elapsed time (parallel): %f \n", timeStop - timeStart); #define TRACEBACK #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); cnt = 0; fprintf(fpo, "print traceback value:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ){ fprintf(fpo, "%d ", input_itemsets[ i * max_cols + j]); test_par[cnt] = input_itemsets[ i * max_cols + j]; cnt++; } if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = input_itemsets[(i - 1) * max_cols + j - 1]; w = input_itemsets[ i * max_cols + j - 1 ]; n = input_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = input_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = input_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + referrence[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); #endif free(referrence); free(input_itemsets); free(output_itemsets); }
pi-v20.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define FINAL_PI 2 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if _DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if _DEBUG_ start= omp_get_wtime(); #endif /* do computation -- using just two threads */ // WARNING : correct code #pragma omp parallel #pragma omp single { #if _DEBUG_ int id = omp_get_thread_num(); #endif #pragma omp task private(i,x) shared(sum) #if !_DEBUG_ { Extrae_event (PROGRAM, PI_COMPUTATION); #endif for (i=0; i < num_steps/2; i++) { x = (i+0.5)*step; #pragma omp atomic sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } #if !_DEBUG_ Extrae_event (PROGRAM, END); } #endif #pragma omp task private(i,x) shared(sum) #if !_DEBUG_ { Extrae_event (PROGRAM, PI_COMPUTATION); #endif for (i=num_steps/2; i < num_steps; i++) { x = (i+0.5)*step; #pragma omp atomic sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } #if !_DEBUG_ Extrae_event (PROGRAM, END); } #endif #pragma omp taskwait #pragma omp task #if !_DEBUG_ { Extrae_event (PROGRAM, FINAL_PI); #endif pi = step * sum; #if !_DEBUG_ Extrae_event (PROGRAM, END); } #endif } #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
GB_unaryop__minv_int64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_uint64 // op(A') function: GB_tran__minv_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
frame.c
/***************************************************************************** * frame.c: h264 encoder library ***************************************************************************** * Copyright (C) 2003-2008 x264 project * * Authors: Laurent Aimar <fenrir@via.ecp.fr> * Loren Merritt <lorenm@u.washington.edu> * Jason Garrett-Glaser <darkshikari@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ #include "common.h" #include <omp.h> #define ALIGN(x,a) (((x)+((a)-1))&~((a)-1)) x264_frame_t *x264_frame_new( x264_t *h ) { x264_frame_t *frame = x264_malloc( sizeof(x264_frame_t) ); int i, j; int i_mb_count = h->mb.i_mb_count; int i_stride, i_width, i_lines; int i_padv = PADV << h->param.b_interlaced; int luma_plane_size; int align = h->param.cpu&X264_CPU_CACHELINE_64 ? 64 : h->param.cpu&X264_CPU_CACHELINE_32 ? 32 : 16; if( !frame ) return NULL; memset( frame, 0, sizeof(x264_frame_t) ); /* allocate frame data (+64 for extra data for me) */ i_width = ALIGN( h->param.i_width, 16 ); i_stride = ALIGN( i_width + 2*PADH, align ); i_lines = ALIGN( h->param.i_height, 16<<h->param.b_interlaced ); frame->i_plane = 3; for( i = 0; i < 3; i++ ) { frame->i_stride[i] = i_stride >> !!i; frame->i_width[i] = i_width >> !!i; frame->i_lines[i] = i_lines >> !!i; } luma_plane_size = (frame->i_stride[0] * ( frame->i_lines[0] + 2*i_padv )); for( i = 1; i < 3; i++ ) { CHECKED_MALLOC( frame->buffer[i], luma_plane_size/4 ); frame->plane[i] = frame->buffer[i] + (frame->i_stride[i] * i_padv + PADH)/2; } /* all 4 luma planes allocated together, since the cacheline split code * requires them to be in-phase wrt cacheline alignment. */ if( h->param.analyse.i_subpel_refine ) { CHECKED_MALLOC( frame->buffer[0], 4*luma_plane_size); for( i = 0; i < 4; i++ ) frame->filtered[i] = frame->buffer[0] + i*luma_plane_size + frame->i_stride[0] * i_padv + PADH; frame->plane[0] = frame->filtered[0]; } else { CHECKED_MALLOC( frame->buffer[0], luma_plane_size); frame->plane[0] = frame->buffer[0] + frame->i_stride[0] * i_padv + PADH; } if( h->frames.b_have_lowres ) { frame->i_width_lowres = frame->i_width[0]/2; frame->i_stride_lowres = ALIGN( frame->i_width_lowres + 2*PADH, align ); frame->i_lines_lowres = frame->i_lines[0]/2; luma_plane_size = frame->i_stride_lowres * ( frame->i_lines[0]/2 + 2*i_padv ); CHECKED_MALLOC( frame->buffer_lowres[0], 4 * luma_plane_size ); for( i = 0; i < 4; i++ ) frame->lowres[i] = frame->buffer_lowres[0] + (frame->i_stride_lowres * i_padv + PADH) + i * luma_plane_size; for( j = 0; j <= !!h->param.i_bframe; j++ ) for( i = 0; i <= h->param.i_bframe; i++ ) { CHECKED_MALLOC( frame->lowres_mvs[j][i], 2*h->mb.i_mb_count*sizeof(int16_t) ); memset( frame->lowres_mvs[j][i], 0, 2*h->mb.i_mb_count*sizeof(int16_t) ); CHECKED_MALLOC( frame->lowres_mv_costs[j][i], h->mb.i_mb_count*sizeof(int) ); } } if( h->param.analyse.i_me_method >= X264_ME_ESA ) { CHECKED_MALLOC( frame->buffer[3], 2 * frame->i_stride[0] * (frame->i_lines[0] + 2*i_padv) * sizeof(uint16_t) ); frame->integral = (uint16_t*)frame->buffer[3] + frame->i_stride[0] * i_padv + PADH; } frame->i_poc = -1; frame->i_type = X264_TYPE_AUTO; frame->i_qpplus1 = 0; frame->i_pts = -1; frame->i_frame = -1; frame->i_frame_num = -1; frame->i_lines_completed = -1; CHECKED_MALLOC( frame->mb_type, i_mb_count * sizeof(int8_t)); CHECKED_MALLOC( frame->mv[0], 2*16 * i_mb_count * sizeof(int16_t) ); CHECKED_MALLOC( frame->ref[0], 4 * i_mb_count * sizeof(int8_t) ); CHECKED_MALLOC( frame->i_intra_cost, i_mb_count * sizeof(uint16_t) ); if( h->param.i_bframe ) { CHECKED_MALLOC( frame->mv[1], 2*16 * i_mb_count * sizeof(int16_t) ); CHECKED_MALLOC( frame->ref[1], 4 * i_mb_count * sizeof(int8_t) ); } else { frame->mv[1] = NULL; frame->ref[1] = NULL; } CHECKED_MALLOC( frame->i_row_bits, i_lines/16 * sizeof(int) ); CHECKED_MALLOC( frame->i_row_qp, i_lines/16 * sizeof(int) ); for( i = 0; i < h->param.i_bframe + 2; i++ ) for( j = 0; j < h->param.i_bframe + 2; j++ ) CHECKED_MALLOC( frame->i_row_satds[i][j], i_lines/16 * sizeof(int) ); if( h->param.rc.i_aq_mode ) { CHECKED_MALLOC( frame->f_qp_offset, h->mb.i_mb_count * sizeof(float) ); if( h->frames.b_have_lowres ) CHECKED_MALLOC( frame->i_inv_qscale_factor, h->mb.i_mb_count * sizeof(uint16_t) ); } x264_pthread_mutex_init( &frame->mutex, NULL ); x264_pthread_cond_init( &frame->cv, NULL ); return frame; fail: x264_frame_delete( frame ); return NULL; } void x264_frame_delete( x264_frame_t *frame ) { int i, j; for( i = 0; i < 4; i++ ) x264_free( frame->buffer[i] ); for( i = 0; i < 4; i++ ) x264_free( frame->buffer_lowres[i] ); for( i = 0; i < X264_BFRAME_MAX+2; i++ ) for( j = 0; j < X264_BFRAME_MAX+2; j++ ) x264_free( frame->i_row_satds[i][j] ); for( j = 0; j < 2; j++ ) for( i = 0; i <= X264_BFRAME_MAX; i++ ) { x264_free( frame->lowres_mvs[j][i] ); x264_free( frame->lowres_mv_costs[j][i] ); } x264_free( frame->f_qp_offset ); x264_free( frame->i_inv_qscale_factor ); x264_free( frame->i_intra_cost ); x264_free( frame->i_row_bits ); x264_free( frame->i_row_qp ); x264_free( frame->mb_type ); x264_free( frame->mv[0] ); x264_free( frame->mv[1] ); x264_free( frame->ref[0] ); x264_free( frame->ref[1] ); x264_pthread_mutex_destroy( &frame->mutex ); x264_pthread_cond_destroy( &frame->cv ); x264_free( frame ); } int x264_frame_copy_picture( x264_t *h, x264_frame_t *dst, x264_picture_t *src ) { int i_csp = src->img.i_csp & X264_CSP_MASK; int i; if( i_csp != X264_CSP_I420 && i_csp != X264_CSP_YV12 ) { x264_log( h, X264_LOG_ERROR, "Arg invalid CSP\n" ); return -1; } dst->i_type = src->i_type; dst->i_qpplus1 = src->i_qpplus1; dst->i_pts = src->i_pts; for( i=0; i<3; i++ ) { int s = (i_csp == X264_CSP_YV12 && i) ? i^3 : i; uint8_t *plane = src->img.plane[s]; int stride = src->img.i_stride[s]; int width = h->param.i_width >> !!i; int height = h->param.i_height >> !!i; if( src->img.i_csp & X264_CSP_VFLIP ) { plane += (height-1)*stride; stride = -stride; } h->mc.plane_copy( dst->plane[i], dst->i_stride[i], plane, stride, width, height ); } return 0; } static void plane_expand_border( uint8_t *pix, int i_stride, int i_width, int i_height, int i_padh, int i_padv, int b_pad_top, int b_pad_bottom ) { #define PPIXEL(x, y) ( pix + (x) + (y)*i_stride ) int y; for( y = 0; y < i_height; y++ ) { /* left band */ memset( PPIXEL(-i_padh, y), PPIXEL(0, y)[0], i_padh ); /* right band */ memset( PPIXEL(i_width, y), PPIXEL(i_width-1, y)[0], i_padh ); } /* upper band */ if( b_pad_top ) for( y = 0; y < i_padv; y++ ) memcpy( PPIXEL(-i_padh, -y-1), PPIXEL(-i_padh, 0), i_width+2*i_padh ); /* lower band */ if( b_pad_bottom ) for( y = 0; y < i_padv; y++ ) memcpy( PPIXEL(-i_padh, i_height+y), PPIXEL(-i_padh, i_height-1), i_width+2*i_padh ); #undef PPIXEL } void x264_frame_expand_border( x264_t *h, x264_frame_t *frame, int mb_y, int b_end ) { int i; int b_start = !mb_y; if( mb_y & h->sh.b_mbaff ) return; for( i = 0; i < frame->i_plane; i++ ) { int stride = frame->i_stride[i]; int width = 16*h->sps->i_mb_width >> !!i; int height = (b_end ? 16*(h->sps->i_mb_height - mb_y) >> h->sh.b_mbaff : 16) >> !!i; int padh = PADH >> !!i; int padv = PADV >> !!i; // buffer: 2 chroma, 3 luma (rounded to 4) because deblocking goes beyond the top of the mb uint8_t *pix = frame->plane[i] + X264_MAX(0, (16*mb_y-4)*stride >> !!i); if( b_end && !b_start ) height += 4 >> (!!i + h->sh.b_mbaff); if( h->sh.b_mbaff ) { plane_expand_border( pix, stride*2, width, height, padh, padv, b_start, b_end ); plane_expand_border( pix+stride, stride*2, width, height, padh, padv, b_start, b_end ); } else { plane_expand_border( pix, stride, width, height, padh, padv, b_start, b_end ); } } } void x264_frame_expand_border_filtered( x264_t *h, x264_frame_t *frame, int mb_y, int b_end ) { /* during filtering, 8 extra pixels were filtered on each edge, * but up to 3 of the horizontal ones may be wrong. we want to expand border from the last filtered pixel */ int b_start = !mb_y; int stride = frame->i_stride[0]; int width = 16*h->sps->i_mb_width + 8; int height = b_end ? (16*(h->sps->i_mb_height - mb_y) >> h->sh.b_mbaff) + 16 : 16; int padh = PADH - 4; int padv = PADV - 8; int i; for( i = 1; i < 4; i++ ) { // buffer: 8 luma, to match the hpel filter uint8_t *pix = frame->filtered[i] + (16*mb_y - (8 << h->sh.b_mbaff)) * stride - 4; if( h->sh.b_mbaff ) { plane_expand_border( pix, stride*2, width, height, padh, padv, b_start, b_end ); plane_expand_border( pix+stride, stride*2, width, height, padh, padv, b_start, b_end ); } else { plane_expand_border( pix, stride, width, height, padh, padv, b_start, b_end ); } } } void x264_frame_expand_border_lowres( x264_frame_t *frame ) { int i; for( i = 0; i < 4; i++ ) plane_expand_border( frame->lowres[i], frame->i_stride_lowres, frame->i_stride_lowres - 2*PADH, frame->i_lines_lowres, PADH, PADV, 1, 1 ); } void x264_frame_expand_border_mod16( x264_t *h, x264_frame_t *frame ) { int i, y; for( i = 0; i < frame->i_plane; i++ ) { int i_subsample = i ? 1 : 0; int i_width = h->param.i_width >> i_subsample; int i_height = h->param.i_height >> i_subsample; int i_padx = ( h->sps->i_mb_width * 16 - h->param.i_width ) >> i_subsample; int i_pady = ( h->sps->i_mb_height * 16 - h->param.i_height ) >> i_subsample; if( i_padx ) { for( y = 0; y < i_height; y++ ) memset( &frame->plane[i][y*frame->i_stride[i] + i_width], frame->plane[i][y*frame->i_stride[i] + i_width - 1], i_padx ); } if( i_pady ) { //FIXME interlace? or just let it pad using the wrong field for( y = i_height; y < i_height + i_pady; y++ ) memcpy( &frame->plane[i][y*frame->i_stride[i]], &frame->plane[i][(i_height-1)*frame->i_stride[i]], i_width + i_padx ); } } } /* cavlc + 8x8 transform stores nnz per 16 coeffs for the purpose of * entropy coding, but per 64 coeffs for the purpose of deblocking */ static void munge_cavlc_nnz_row( x264_t *h, int mb_y, uint8_t (*buf)[16] ) { uint32_t (*src)[6] = (uint32_t(*)[6])h->mb.non_zero_count + mb_y * h->sps->i_mb_width; int8_t *transform = h->mb.mb_transform_size + mb_y * h->sps->i_mb_width; int x, nnz; for( x=0; x<h->sps->i_mb_width; x++ ) { memcpy( buf+x, src+x, 16 ); if( transform[x] ) { nnz = src[x][0] | src[x][1]; src[x][0] = src[x][1] = ((uint16_t)nnz ? 0x0101 : 0) + (nnz>>16 ? 0x01010000 : 0); nnz = src[x][2] | src[x][3]; src[x][2] = src[x][3] = ((uint16_t)nnz ? 0x0101 : 0) + (nnz>>16 ? 0x01010000 : 0); } } } static void restore_cavlc_nnz_row( x264_t *h, int mb_y, uint8_t (*buf)[16] ) { uint8_t (*dst)[24] = h->mb.non_zero_count + mb_y * h->sps->i_mb_width; int x; for( x=0; x<h->sps->i_mb_width; x++ ) memcpy( dst+x, buf+x, 16 ); } static void munge_cavlc_nnz( x264_t *h, int mb_y, uint8_t (*buf)[16], void (*func)(x264_t*, int, uint8_t (*)[16]) ) { func( h, mb_y, buf ); if( mb_y > 0 ) func( h, mb_y-1, buf + h->sps->i_mb_width ); if( h->sh.b_mbaff ) { func( h, mb_y+1, buf + h->sps->i_mb_width * 2 ); if( mb_y > 0 ) func( h, mb_y-2, buf + h->sps->i_mb_width * 3 ); } } /* Deblocking filter */ static const uint8_t i_alpha_table[52+12*2] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 17, 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, 63, 71, 80, 90,101,113,127,144,162,182,203,226, 255,255, 255,255,255,255,255,255,255,255,255,255,255,255, }; static const uint8_t i_beta_table[52+12*2] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, }; static const int8_t i_tc0_table[52+12*2][4] = { {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 }, {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 }, {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 }, {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, }; #define alpha_table(x) i_alpha_table[(x)+12] #define beta_table(x) i_beta_table[(x)+12] #define tc0_table(x) i_tc0_table[(x)+12] /* From ffmpeg */ static inline void deblock_luma_c( uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 ) { int i, d; for( i = 0; i < 4; i++ ) { if( tc0[i] < 0 ) { pix += 4*ystride; continue; } for( d = 0; d < 4; d++ ) { const int p2 = pix[-3*xstride]; const int p1 = pix[-2*xstride]; const int p0 = pix[-1*xstride]; const int q0 = pix[ 0*xstride]; const int q1 = pix[ 1*xstride]; const int q2 = pix[ 2*xstride]; if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta ) { int tc = tc0[i]; int delta; if( abs( p2 - p0 ) < beta ) { pix[-2*xstride] = p1 + x264_clip3( (( p2 + ((p0 + q0 + 1) >> 1)) >> 1) - p1, -tc0[i], tc0[i] ); tc++; } if( abs( q2 - q0 ) < beta ) { pix[ 1*xstride] = q1 + x264_clip3( (( q2 + ((p0 + q0 + 1) >> 1)) >> 1) - q1, -tc0[i], tc0[i] ); tc++; } delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-1*xstride] = x264_clip_uint8( p0 + delta ); /* p0' */ pix[ 0*xstride] = x264_clip_uint8( q0 - delta ); /* q0' */ } pix += ystride; } } } static void deblock_v_luma_c( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) { deblock_luma_c( pix, stride, 1, alpha, beta, tc0 ); } static void deblock_h_luma_c( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) { deblock_luma_c( pix, 1, stride, alpha, beta, tc0 ); } static inline void deblock_chroma_c( uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0 ) { int i, d; for( i = 0; i < 4; i++ ) { const int tc = tc0[i]; if( tc <= 0 ) { pix += 2*ystride; continue; } for( d = 0; d < 2; d++ ) { const int p1 = pix[-2*xstride]; const int p0 = pix[-1*xstride]; const int q0 = pix[ 0*xstride]; const int q1 = pix[ 1*xstride]; if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta ) { int delta = x264_clip3( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-1*xstride] = x264_clip_uint8( p0 + delta ); /* p0' */ pix[ 0*xstride] = x264_clip_uint8( q0 - delta ); /* q0' */ } pix += ystride; } } } static void deblock_v_chroma_c( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) { deblock_chroma_c( pix, stride, 1, alpha, beta, tc0 ); } static void deblock_h_chroma_c( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) { deblock_chroma_c( pix, 1, stride, alpha, beta, tc0 ); } static inline void deblock_luma_intra_c( uint8_t *pix, int xstride, int ystride, int alpha, int beta ) { int d; for( d = 0; d < 16; d++ ) { const int p2 = pix[-3*xstride]; const int p1 = pix[-2*xstride]; const int p0 = pix[-1*xstride]; const int q0 = pix[ 0*xstride]; const int q1 = pix[ 1*xstride]; const int q2 = pix[ 2*xstride]; if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta ) { if(abs( p0 - q0 ) < ((alpha >> 2) + 2) ) { if( abs( p2 - p0 ) < beta ) /* p0', p1', p2' */ { const int p3 = pix[-4*xstride]; pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; } else /* p0' */ pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; if( abs( q2 - q0 ) < beta ) /* q0', q1', q2' */ { const int q3 = pix[3*xstride]; pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; } else /* q0' */ pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } else /* p0', q0' */ { pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } } pix += ystride; } } static void deblock_v_luma_intra_c( uint8_t *pix, int stride, int alpha, int beta ) { deblock_luma_intra_c( pix, stride, 1, alpha, beta ); } static void deblock_h_luma_intra_c( uint8_t *pix, int stride, int alpha, int beta ) { deblock_luma_intra_c( pix, 1, stride, alpha, beta ); } static inline void deblock_chroma_intra_c( uint8_t *pix, int xstride, int ystride, int alpha, int beta ) { int d; for( d = 0; d < 8; d++ ) { const int p1 = pix[-2*xstride]; const int p0 = pix[-1*xstride]; const int q0 = pix[ 0*xstride]; const int q1 = pix[ 1*xstride]; if( abs( p0 - q0 ) < alpha && abs( p1 - p0 ) < beta && abs( q1 - q0 ) < beta ) { pix[-1*xstride] = (2*p1 + p0 + q1 + 2) >> 2; /* p0' */ pix[ 0*xstride] = (2*q1 + q0 + p1 + 2) >> 2; /* q0' */ } pix += ystride; } } static void deblock_v_chroma_intra_c( uint8_t *pix, int stride, int alpha, int beta ) { deblock_chroma_intra_c( pix, stride, 1, alpha, beta ); } static void deblock_h_chroma_intra_c( uint8_t *pix, int stride, int alpha, int beta ) { deblock_chroma_intra_c( pix, 1, stride, alpha, beta ); } static inline void deblock_edge( x264_t *h, uint8_t *pix1, uint8_t *pix2, int i_stride, uint8_t bS[4], int i_qp, int b_chroma, x264_deblock_inter_t pf_inter ) { const int index_a = i_qp + h->sh.i_alpha_c0_offset; const int alpha = alpha_table(index_a); const int beta = beta_table(i_qp + h->sh.i_beta_offset); int8_t tc[4]; if( !alpha || !beta ) return; tc[0] = tc0_table(index_a)[bS[0]] + b_chroma; tc[1] = tc0_table(index_a)[bS[1]] + b_chroma; tc[2] = tc0_table(index_a)[bS[2]] + b_chroma; tc[3] = tc0_table(index_a)[bS[3]] + b_chroma; pf_inter( pix1, i_stride, alpha, beta, tc ); if( b_chroma ) pf_inter( pix2, i_stride, alpha, beta, tc ); } static inline void deblock_edge_intra( x264_t *h, uint8_t *pix1, uint8_t *pix2, int i_stride, uint8_t bS[4], int i_qp, int b_chroma, x264_deblock_intra_t pf_intra ) { const int alpha = alpha_table(i_qp + h->sh.i_alpha_c0_offset); const int beta = beta_table(i_qp + h->sh.i_beta_offset); if( !alpha || !beta ) return; pf_intra( pix1, i_stride, alpha, beta ); if( b_chroma ) pf_intra( pix2, i_stride, alpha, beta ); } void x264_frame_deblock_row( x264_t *h, int mb_y ) { const int s8x8 = 2 * h->mb.i_mb_stride; const int s4x4 = 4 * h->mb.i_mb_stride; const int b_interlaced = h->sh.b_mbaff; const int mvy_limit = 4 >> b_interlaced; const int qp_thresh = 15 - X264_MIN(h->sh.i_alpha_c0_offset, h->sh.i_beta_offset) - X264_MAX(0, h->param.analyse.i_chroma_qp_offset); const int no_sub8x8 = !(h->param.analyse.inter & X264_ANALYSE_PSUB8x8); int mb_x; int stridey = h->fdec->i_stride[0]; int stride2y = stridey << b_interlaced; int strideuv = h->fdec->i_stride[1]; int stride2uv = strideuv << b_interlaced; if( !h->pps->b_cabac && h->pps->b_transform_8x8_mode ) munge_cavlc_nnz( h, mb_y, h->mb.nnz_backup, munge_cavlc_nnz_row ); for( mb_x = 0; mb_x < h->sps->i_mb_width; mb_x += (~b_interlaced | mb_y)&1, mb_y ^= b_interlaced ) { const int mb_xy = mb_y * h->mb.i_mb_stride + mb_x; const int mb_8x8 = 2 * s8x8 * mb_y + 2 * mb_x; const int mb_4x4 = 4 * s4x4 * mb_y + 4 * mb_x; const int b_8x8_transform = h->mb.mb_transform_size[mb_xy]; const int i_qp = h->mb.qp[mb_xy]; int i_edge_end = (h->mb.type[mb_xy] == P_SKIP) ? 1 : 4; uint8_t *pixy = h->fdec->plane[0] + 16*mb_y*stridey + 16*mb_x; uint8_t *pixu = h->fdec->plane[1] + 8*mb_y*strideuv + 8*mb_x; uint8_t *pixv = h->fdec->plane[2] + 8*mb_y*strideuv + 8*mb_x; if( b_interlaced && (mb_y&1) ) { pixy -= 15*stridey; pixu -= 7*strideuv; pixv -= 7*strideuv; } x264_prefetch_fenc( h, h->fdec, mb_x, mb_y ); if( i_qp <= qp_thresh ) i_edge_end = 1; #define FILTER_DIR(intra, i_dir)\ {\ /* Y plane */\ i_qpn= h->mb.qp[mbn_xy];\ if( i_dir == 0 )\ {\ /* vertical edge */\ deblock_edge##intra( h, pixy + 4*i_edge, NULL,\ stride2y, bS, (i_qp+i_qpn+1) >> 1, 0,\ h->loopf.deblock_h_luma##intra );\ if( !(i_edge & 1) )\ {\ /* U/V planes */\ int i_qpc = (h->chroma_qp_table[i_qp] + h->chroma_qp_table[i_qpn] + 1) >> 1;\ deblock_edge##intra( h, pixu + 2*i_edge, pixv + 2*i_edge,\ stride2uv, bS, i_qpc, 1,\ h->loopf.deblock_h_chroma##intra );\ }\ }\ else\ {\ /* horizontal edge */\ deblock_edge##intra( h, pixy + 4*i_edge*stride2y, NULL,\ stride2y, bS, (i_qp+i_qpn+1) >> 1, 0,\ h->loopf.deblock_v_luma##intra );\ /* U/V planes */\ if( !(i_edge & 1) )\ {\ int i_qpc = (h->chroma_qp_table[i_qp] + h->chroma_qp_table[i_qpn] + 1) >> 1;\ deblock_edge##intra( h, pixu + 2*i_edge*stride2uv, pixv + 2*i_edge*stride2uv,\ stride2uv, bS, i_qpc, 1,\ h->loopf.deblock_v_chroma##intra );\ }\ }\ } #define DEBLOCK_STRENGTH(i_dir)\ {\ /* *** Get bS for each 4px for the current edge *** */\ if( IS_INTRA( h->mb.type[mb_xy] ) || IS_INTRA( h->mb.type[mbn_xy]) )\ *(uint32_t*)bS = 0x03030303;\ else\ {\ *(uint32_t*)bS = 0x00000000;\ for( i = 0; i < 4; i++ )\ {\ int x = i_dir == 0 ? i_edge : i;\ int y = i_dir == 0 ? i : i_edge;\ int xn = i_dir == 0 ? (x - 1)&0x03 : x;\ int yn = i_dir == 0 ? y : (y - 1)&0x03;\ if( h->mb.non_zero_count[mb_xy][x+y*4] != 0 ||\ h->mb.non_zero_count[mbn_xy][xn+yn*4] != 0 )\ bS[i] = 2;\ else if(!(i_edge&no_sub8x8))\ {\ if((i&no_sub8x8) && bS[i-1] != 2)\ bS[i] = bS[i-1];\ else\ {\ /* FIXME: A given frame may occupy more than one position in\ * the reference list. So we should compare the frame numbers,\ * not the indices in the ref list.\ * No harm yet, as we don't generate that case.*/\ int i8p= mb_8x8+(x>>1)+(y>>1)*s8x8;\ int i8q= mbn_8x8+(xn>>1)+(yn>>1)*s8x8;\ int i4p= mb_4x4+x+y*s4x4;\ int i4q= mbn_4x4+xn+yn*s4x4;\ if((h->mb.ref[0][i8p] != h->mb.ref[0][i8q] ||\ abs( h->mb.mv[0][i4p][0] - h->mb.mv[0][i4q][0] ) >= 4 ||\ abs( h->mb.mv[0][i4p][1] - h->mb.mv[0][i4q][1] ) >= mvy_limit ) ||\ (h->sh.i_type == SLICE_TYPE_B &&\ (h->mb.ref[1][i8p] != h->mb.ref[1][i8q] ||\ abs( h->mb.mv[1][i4p][0] - h->mb.mv[1][i4q][0] ) >= 4 ||\ abs( h->mb.mv[1][i4p][1] - h->mb.mv[1][i4q][1] ) >= mvy_limit )))\ {\ bS[i] = 1;\ }\ }\ }\ }\ }\ } /* i_dir == 0 -> vertical edge * i_dir == 1 -> horizontal edge */ #define DEBLOCK_DIR(i_dir)\ {\ int i_edge = (i_dir ? (mb_y <= b_interlaced) : (mb_x == 0));\ int i_qpn, i, mbn_xy, mbn_8x8, mbn_4x4;\ DECLARE_ALIGNED_4( uint8_t bS[4] ); /* filtering strength */\ if( i_edge )\ i_edge+= b_8x8_transform;\ else\ {\ mbn_xy = i_dir == 0 ? mb_xy - 1 : mb_xy - h->mb.i_mb_stride;\ mbn_8x8 = i_dir == 0 ? mb_8x8 - 2 : mb_8x8 - 2 * s8x8;\ mbn_4x4 = i_dir == 0 ? mb_4x4 - 4 : mb_4x4 - 4 * s4x4;\ if( b_interlaced && i_dir == 1 )\ {\ mbn_xy -= h->mb.i_mb_stride;\ mbn_8x8 -= 2 * s8x8;\ mbn_4x4 -= 4 * s4x4;\ }\ else if( IS_INTRA( h->mb.type[mb_xy] ) || IS_INTRA( h->mb.type[mbn_xy]) )\ {\ FILTER_DIR( _intra, i_dir );\ goto end##i_dir;\ }\ DEBLOCK_STRENGTH(i_dir);\ if( *(uint32_t*)bS )\ FILTER_DIR( , i_dir);\ end##i_dir:\ i_edge += b_8x8_transform+1;\ }\ mbn_xy = mb_xy;\ mbn_8x8 = mb_8x8;\ mbn_4x4 = mb_4x4;\ for( ; i_edge < i_edge_end; i_edge+=b_8x8_transform+1 )\ {\ DEBLOCK_STRENGTH(i_dir);\ if( *(uint32_t*)bS )\ FILTER_DIR( , i_dir);\ }\ } DEBLOCK_DIR(0); DEBLOCK_DIR(1); } if( !h->pps->b_cabac && h->pps->b_transform_8x8_mode ) munge_cavlc_nnz( h, mb_y, h->mb.nnz_backup, restore_cavlc_nnz_row ); } void x264_frame_deblock( x264_t *h ) { int mb_y; for( mb_y = 0; mb_y < h->sps->i_mb_height; mb_y += 1 + h->sh.b_mbaff ) x264_frame_deblock_row( h, mb_y ); } #ifdef HAVE_MMX void x264_deblock_v_chroma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); void x264_deblock_h_chroma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); void x264_deblock_v_chroma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta ); void x264_deblock_h_chroma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta ); void x264_deblock_v_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); void x264_deblock_h_luma_sse2( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); void x264_deblock_v_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta ); void x264_deblock_h_luma_intra_sse2( uint8_t *pix, int stride, int alpha, int beta ); #ifdef ARCH_X86 void x264_deblock_h_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); void x264_deblock_v8_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); void x264_deblock_h_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta ); void x264_deblock_v8_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta ); static void x264_deblock_v_luma_mmxext( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) { x264_deblock_v8_luma_mmxext( pix, stride, alpha, beta, tc0 ); x264_deblock_v8_luma_mmxext( pix+8, stride, alpha, beta, tc0+2 ); } static void x264_deblock_v_luma_intra_mmxext( uint8_t *pix, int stride, int alpha, int beta ) { x264_deblock_v8_luma_intra_mmxext( pix, stride, alpha, beta ); x264_deblock_v8_luma_intra_mmxext( pix+8, stride, alpha, beta ); } #endif #endif #ifdef ARCH_PPC void x264_deblock_v_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); void x264_deblock_h_luma_altivec( uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0 ); #endif // ARCH_PPC void x264_deblock_init( int cpu, x264_deblock_function_t *pf ) { pf->deblock_v_luma = deblock_v_luma_c; pf->deblock_h_luma = deblock_h_luma_c; pf->deblock_v_chroma = deblock_v_chroma_c; pf->deblock_h_chroma = deblock_h_chroma_c; pf->deblock_v_luma_intra = deblock_v_luma_intra_c; pf->deblock_h_luma_intra = deblock_h_luma_intra_c; pf->deblock_v_chroma_intra = deblock_v_chroma_intra_c; pf->deblock_h_chroma_intra = deblock_h_chroma_intra_c; #ifdef HAVE_MMX if( cpu&X264_CPU_MMXEXT ) { pf->deblock_v_chroma = x264_deblock_v_chroma_mmxext; pf->deblock_h_chroma = x264_deblock_h_chroma_mmxext; pf->deblock_v_chroma_intra = x264_deblock_v_chroma_intra_mmxext; pf->deblock_h_chroma_intra = x264_deblock_h_chroma_intra_mmxext; #ifdef ARCH_X86 pf->deblock_v_luma = x264_deblock_v_luma_mmxext; pf->deblock_h_luma = x264_deblock_h_luma_mmxext; pf->deblock_v_luma_intra = x264_deblock_v_luma_intra_mmxext; pf->deblock_h_luma_intra = x264_deblock_h_luma_intra_mmxext; #endif if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_STACK_MOD4) ) { pf->deblock_v_luma = x264_deblock_v_luma_sse2; pf->deblock_h_luma = x264_deblock_h_luma_sse2; pf->deblock_v_luma_intra = x264_deblock_v_luma_intra_sse2; pf->deblock_h_luma_intra = x264_deblock_h_luma_intra_sse2; } } #endif #ifdef ARCH_PPC if( cpu&X264_CPU_ALTIVEC ) { pf->deblock_v_luma = x264_deblock_v_luma_altivec; pf->deblock_h_luma = x264_deblock_h_luma_altivec; } #endif // ARCH_PPC } /* threading */ void x264_frame_cond_broadcast( x264_frame_t *frame, int i_lines_completed ) { #pragma omp critical frame->i_lines_completed = i_lines_completed; x264_pthread_cond_broadcast( &frame->cv ); } void x264_frame_cond_wait( x264_frame_t *frame, int i_lines_completed ) { #pragma omp critical while( frame->i_lines_completed < i_lines_completed ) x264_pthread_cond_wait( &frame->cv, &frame->mutex ); } /* list operators */ void x264_frame_push( x264_frame_t **list, x264_frame_t *frame ) { int i = 0; while( list[i] ) i++; list[i] = frame; } x264_frame_t *x264_frame_pop( x264_frame_t **list ) { x264_frame_t *frame; int i = 0; assert( list[0] ); while( list[i+1] ) i++; frame = list[i]; list[i] = NULL; return frame; } void x264_frame_unshift( x264_frame_t **list, x264_frame_t *frame ) { int i = 0; while( list[i] ) i++; while( i-- ) list[i+1] = list[i]; list[0] = frame; } x264_frame_t *x264_frame_shift( x264_frame_t **list ) { x264_frame_t *frame = list[0]; int i; for( i = 0; list[i]; i++ ) list[i] = list[i+1]; assert(frame); return frame; } void x264_frame_push_unused( x264_t *h, x264_frame_t *frame ) { assert( frame->i_reference_count > 0 ); frame->i_reference_count--; if( frame->i_reference_count == 0 ) x264_frame_push( h->frames.unused, frame ); assert( h->frames.unused[ sizeof(h->frames.unused) / sizeof(*h->frames.unused) - 1 ] == NULL ); } x264_frame_t *x264_frame_pop_unused( x264_t *h ) { x264_frame_t *frame; if( h->frames.unused[0] ) frame = x264_frame_pop( h->frames.unused ); else frame = x264_frame_new( h ); assert( frame->i_reference_count == 0 ); frame->i_reference_count = 1; frame->b_intra_calculated = 0; return frame; } void x264_frame_sort( x264_frame_t **list, int b_dts ) { int i, b_ok; do { b_ok = 1; for( i = 0; list[i+1]; i++ ) { int dtype = list[i]->i_type - list[i+1]->i_type; int dtime = list[i]->i_frame - list[i+1]->i_frame; int swap = b_dts ? dtype > 0 || ( dtype == 0 && dtime > 0 ) : dtime > 0; if( swap ) { XCHG( x264_frame_t*, list[i], list[i+1] ); b_ok = 0; } } } while( !b_ok ); }
parallel_matrix.c
/* Parallel implementation of matrix multiplication using OpenMP compile using: gcc -fopenmp parallel_matrix.c */ #include<stdio.h> #include <omp.h> // prints the matrix void matrix_print(int p, int q, int m[][q]) { int i,j; for(i = 0; i < p; i++) { for(j = 0; j<q; j++) { printf("%d\t", m[i][j]); } printf("\n"); } } int main() { int m,n,p,q; int i, j, k, sum; //Manual user input printf("Enter the rows and column of the first matrix: "); scanf("%d%d", &m, &n); printf("Enter the rows and column of the second matrix: "); scanf("%d%d", &p, &q); if(n!=p) { printf("Product is not possible"); return 1; } int first[m][n], second[p][q], product[m][q]; printf("Enter elements of the first matrix: "); for(i = 0; i < m; i++) for(j = 0; j < n; j++) scanf("%d", &first[m][n]); printf("Enter elements of the second matrix: "); for(i = 0; i < m; i++) for(j = 0; j < n; j++) scanf("%d", &second[m][n]); //test values /* m = n = p = q = 3; int first[][3] = { 1,2,3, 4,5,6, 7,8,9 }; int second[][3] = { 3,2,3, 4,4,1, 1,1,2 }; // product is // 14 13 11 // 38 34 29 // 62 55 47 */ int product[m][q]; // The since each element product doesn't depend upon a the previous iteration, the iterations // are equally distributed among the processors #pragma omp parallel for private(i,j,k,sum) shared(first,second,product) for(i = 0; i < m; i++) { for(j = 0; j < q; j++) { sum = 0; for(k = 0; k < p; k++) sum += first[i][k] * second[k][j]; product[i][j] = sum; } } printf("Product is: \n"); matrix_print(m, q, product); return 0; }
sparselu.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include "bots.h" #include "sparselu.h" /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if ( r_err == 0.0 ) continue; if (r_err < 0.0 ) r_err = -r_err; if ( M[i*bots_arg_size_1+j] == 0 ) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]); return FALSE; } r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); return FALSE; } } } return TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { bots_message("Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { M[ii*bots_arg_size+jj] = NULL; } } } } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; bots_message("Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");} else bots_message(" "); } bots_message("\n"); } bots_message("\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { bots_message("Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_par_call(float **BENCH) { int ii, jj, kk; bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel #pragma omp single #pragma omp task for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) #pragma omp task firstprivate(kk, jj) shared(BENCH) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) #pragma omp task firstprivate(kk, ii) shared(BENCH) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } #pragma omp taskwait for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } #pragma omp taskwait } bots_message(" completed!\n"); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
c-omp.c
/* This file contains routines to construct OpenACC and OpenMP constructs, called from parsing in the C and C++ front ends. Copyright (C) 2005-2020 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>, Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "options.h" #include "c-common.h" #include "gimple-expr.h" #include "c-pragma.h" #include "stringpool.h" #include "omp-general.h" #include "gomp-constants.h" #include "memmodel.h" #include "attribs.h" #include "gimplify.h" #include "langhooks.h" /* Complete a #pragma oacc wait construct. LOC is the location of the #pragma. */ tree c_finish_oacc_wait (location_t loc, tree parms, tree clauses) { const int nparms = list_length (parms); tree stmt, t; vec<tree, va_gc> *args; vec_alloc (args, nparms + 2); stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC)) t = OMP_CLAUSE_ASYNC_EXPR (clauses); else t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); args->quick_push (t); args->quick_push (build_int_cst (integer_type_node, nparms)); for (t = parms; t; t = TREE_CHAIN (t)) { if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) args->quick_push (build_int_cst (integer_type_node, TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); else args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); } stmt = build_call_expr_loc_vec (loc, stmt, args); vec_free (args); return stmt; } /* Complete a #pragma omp master construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_master (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a #pragma omp taskgroup construct. BODY is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_taskgroup (location_t loc, tree body, tree clauses) { tree stmt = make_node (OMP_TASKGROUP); TREE_TYPE (stmt) = void_type_node; OMP_TASKGROUP_BODY (stmt) = body; OMP_TASKGROUP_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp critical construct. BODY is the structured-block that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses) { gcc_assert (!clauses || OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_HINT); if (name == NULL_TREE && clauses != NULL_TREE && integer_nonzerop (OMP_CLAUSE_HINT_EXPR (clauses))) { error_at (OMP_CLAUSE_LOCATION (clauses), "%<#pragma omp critical%> with %<hint%> clause requires " "a name, except when %<omp_sync_hint_none%> is used"); return error_mark_node; } tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; OMP_CRITICAL_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp ordered construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered (location_t loc, tree clauses, tree stmt) { tree t = make_node (OMP_ORDERED); TREE_TYPE (t) = void_type_node; OMP_ORDERED_BODY (t) = stmt; if (!flag_openmp /* flag_openmp_simd */ && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD || OMP_CLAUSE_CHAIN (clauses))) clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD); OMP_ORDERED_CLAUSES (t) = clauses; SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Complete a #pragma omp barrier construct. LOC is the location of the #pragma. */ void c_finish_omp_barrier (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskwait construct. LOC is the location of the pragma. */ void c_finish_omp_taskwait (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskyield construct. LOC is the location of the pragma. */ void c_finish_omp_taskyield (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC the expression to be implemented atomically is LHS opcode= RHS. For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS opcode= RHS with the new or old content of LHS returned. LOC is the location of the atomic statement. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC* node which should be added to the current statement tree with add_stmt. If TEST is set, avoid calling save_expr or create_tmp_var*. */ tree c_finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool swapped, enum omp_memory_order memory_order, bool test) { tree x, type, addr, pre = NULL_TREE; HOST_WIDE_INT bitpos = 0, bitsize = 0; if (lhs == error_mark_node || rhs == error_mark_node || v == error_mark_node || lhs1 == error_mark_node || rhs1 == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } if (TYPE_ATOMIC (type)) { error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>"); return error_mark_node; } if (opcode == RDIV_EXPR) opcode = TRUNC_DIV_EXPR; /* ??? Validate that rhs does not overlap lhs. */ tree blhs = NULL; if (TREE_CODE (lhs) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1)) && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1))) { tree field = TREE_OPERAND (lhs, 1); tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; else bitpos = 0; bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); gcc_assert (tree_fits_shwi_p (DECL_SIZE (field))); bitsize = tree_to_shwi (DECL_SIZE (field)); blhs = lhs; type = TREE_TYPE (repr); lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0), repr, TREE_OPERAND (lhs, 2)); } /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, false); if (addr == error_mark_node) return error_mark_node; if (!test) addr = save_expr (addr); if (!test && TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || !VAR_P (TREE_OPERAND (addr, 0)))) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr)); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } tree orig_lhs = lhs; lhs = build_indirect_ref (loc, addr, RO_NULL); tree new_lhs = lhs; if (code == OMP_ATOMIC_READ) { x = build1 (OMP_ATOMIC_READ, type, addr); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; if (blhs) x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, bitsize_int (bitsize), bitsize_int (bitpos)); return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); } /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ if (blhs) { lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs, bitsize_int (bitsize), bitsize_int (bitpos)); if (swapped) rhs = build_binary_op (loc, opcode, rhs, lhs, true); else if (opcode != NOP_EXPR) rhs = build_binary_op (loc, opcode, lhs, rhs, true); opcode = NOP_EXPR; } else if (swapped) { rhs = build_binary_op (loc, opcode, rhs, lhs, true); opcode = NOP_EXPR; } bool save = in_late_binary_op; in_late_binary_op = true; x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); in_late_binary_op = save; if (x == error_mark_node) return error_mark_node; if (TREE_CODE (x) == COMPOUND_EXPR) { pre = TREE_OPERAND (x, 0); gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre)); x = TREE_OPERAND (x, 1); } gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); if (blhs) rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs, rhs, bitsize_int (bitpos)); /* Punt the actual generation of atomic operations to common code. */ if (code == OMP_ATOMIC) type = void_type_node; x = build2 (code, type, addr, rhs); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (rhs1 && VAR_P (rhs1) && VAR_P (orig_lhs) && rhs1 != orig_lhs && !test) { if (code == OMP_ATOMIC) error_at (loc, "%<#pragma omp atomic update%> uses two different " "variables for memory"); else error_at (loc, "%<#pragma omp atomic capture%> uses two different " "variables for memory"); return error_mark_node; } if (lhs1 && lhs1 != orig_lhs && TREE_CODE (lhs1) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1)) && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1))) { tree field = TREE_OPERAND (lhs1, 1); tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0), repr, TREE_OPERAND (lhs1, 2)); } if (rhs1 && rhs1 != orig_lhs && TREE_CODE (rhs1) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1)) && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1))) { tree field = TREE_OPERAND (rhs1, 1); tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0), repr, TREE_OPERAND (rhs1, 2)); } if (code != OMP_ATOMIC) { /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs)) { if (lhs1 != orig_lhs && !test) { error_at (loc, "%<#pragma omp atomic capture%> uses two " "different variables for memory"); return error_mark_node; } } if (blhs) { x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, bitsize_int (bitsize), bitsize_int (bitpos)); type = TREE_TYPE (blhs); } x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); if (rhs1 && rhs1 != orig_lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (lhs1 && lhs1 != orig_lhs) { tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false); if (lhs1addr == error_mark_node) return error_mark_node; if (code == OMP_ATOMIC_CAPTURE_OLD) x = omit_one_operand_loc (loc, type, x, lhs1addr); else { if (!test) x = save_expr (x); x = omit_two_operands_loc (loc, type, x, x, lhs1addr); } } } else if (rhs1 && rhs1 != orig_lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (pre) x = omit_one_operand_loc (loc, type, x, pre); return x; } /* Return true if TYPE is the implementation's omp_depend_t. */ bool c_omp_depend_t_p (tree type) { type = TYPE_MAIN_VARIANT (type); return (TREE_CODE (type) == RECORD_TYPE && TYPE_NAME (type) && ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL ? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type)) == get_identifier ("omp_depend_t")) && (!TYPE_CONTEXT (type) || TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL) && COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && !compare_tree_int (TYPE_SIZE (type), 2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node)))); } /* Complete a #pragma omp depobj construct. LOC is the location of the #pragma. */ void c_finish_omp_depobj (location_t loc, tree depobj, enum omp_clause_depend_kind kind, tree clause) { tree t = NULL_TREE; if (!error_operand_p (depobj)) { if (!c_omp_depend_t_p (TREE_TYPE (depobj))) { error_at (EXPR_LOC_OR_LOC (depobj, loc), "type of %<depobj%> expression is not %<omp_depend_t%>"); depobj = error_mark_node; } else if (TYPE_READONLY (TREE_TYPE (depobj))) { error_at (EXPR_LOC_OR_LOC (depobj, loc), "%<const%> qualified %<depobj%> expression"); depobj = error_mark_node; } } else depobj = error_mark_node; if (clause == error_mark_node) return; if (clause) { gcc_assert (TREE_CODE (clause) == OMP_CLAUSE && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND); if (OMP_CLAUSE_CHAIN (clause)) error_at (OMP_CLAUSE_LOCATION (clause), "more than one locator in %<depend%> clause on %<depobj%> " "construct"); switch (OMP_CLAUSE_DEPEND_KIND (clause)) { case OMP_CLAUSE_DEPEND_DEPOBJ: error_at (OMP_CLAUSE_LOCATION (clause), "%<depobj%> dependence type specified in %<depend%> " "clause on %<depobj%> construct"); return; case OMP_CLAUSE_DEPEND_SOURCE: case OMP_CLAUSE_DEPEND_SINK: error_at (OMP_CLAUSE_LOCATION (clause), "%<depend(%s)%> is only allowed in %<omp ordered%>", OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink"); return; case OMP_CLAUSE_DEPEND_IN: case OMP_CLAUSE_DEPEND_OUT: case OMP_CLAUSE_DEPEND_INOUT: case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: kind = OMP_CLAUSE_DEPEND_KIND (clause); t = OMP_CLAUSE_DECL (clause); gcc_assert (t); if (TREE_CODE (t) == TREE_LIST && TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) { error_at (OMP_CLAUSE_LOCATION (clause), "%<iterator%> modifier may not be specified on " "%<depobj%> construct"); return; } if (TREE_CODE (t) == COMPOUND_EXPR) { tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1)); t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0), t1); } else t = build_fold_addr_expr (t); break; default: gcc_unreachable (); } } else gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE); if (depobj == error_mark_node) return; depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj); tree dtype = build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node), true); depobj = fold_convert (dtype, depobj); tree r; if (clause) { depobj = save_expr (depobj); r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); } int k; switch (kind) { case OMP_CLAUSE_DEPEND_IN: k = GOMP_DEPEND_IN; break; case OMP_CLAUSE_DEPEND_OUT: k = GOMP_DEPEND_OUT; break; case OMP_CLAUSE_DEPEND_INOUT: k = GOMP_DEPEND_INOUT; break; case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: k = GOMP_DEPEND_MUTEXINOUTSET; break; case OMP_CLAUSE_DEPEND_LAST: k = -1; break; default: gcc_unreachable (); } t = build_int_cst (ptr_type_node, k); depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj, TYPE_SIZE_UNIT (ptr_type_node)); r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); } /* Complete a #pragma omp flush construct. We don't do anything with the variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush (location_t loc, int mo) { tree x; if (mo == MEMMODEL_LAST) { x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); x = build_call_expr_loc (loc, x, 0); } else { x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE); x = build_call_expr_loc (loc, x, 1, build_int_cst (integer_type_node, mo)); } add_stmt (x); } /* Check and canonicalize OMP_FOR increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; case COMPOUND_EXPR: { /* cp_build_modify_expr forces preevaluation of the RHS to make sure that it is evaluated before the lvalue-rvalue conversion is applied to the LHS. Reconstruct the original expression. */ tree op0 = TREE_OPERAND (exp, 0); if (TREE_CODE (op0) == TARGET_EXPR && !VOID_TYPE_P (TREE_TYPE (op0))) { tree op1 = TREE_OPERAND (exp, 1); tree temp = TARGET_EXPR_SLOT (op0); if (BINARY_CLASS_P (op1) && TREE_OPERAND (op1, 1) == temp) { op1 = copy_node (op1); TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); return check_omp_for_incr_expr (loc, op1, decl); } } break; } default: break; } return error_mark_node; } /* If the OMP_FOR increment expression in INCR is of pointer type, canonicalize it into an expression handled by gimplify_omp_for() and return it. DECL is the iteration variable. */ static tree c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) { if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (loc, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); t = fold_build_pointer_plus (decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } return incr; } /* Validate and generate OMP_FOR. DECLV is a vector of iteration variables, for each collapsed loop. ORIG_DECLV, if non-NULL, is a vector with the original iteration variables (prior to any transformations, by say, C++ iterators). INITV, CONDV and INCRV are vectors containing initialization expressions, controlling predicates and increment expressions. BODY is the body of the loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, bool final_p) { location_t elocus; bool fail = false; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } else if (TYPE_ATOMIC (TREE_TYPE (decl))) { error_at (elocus, "%<_Atomic%> iteration variable %qE", decl); fail = true; /* _Atomic iterator confuses stuff too much, so we risk ICE trying to diagnose it further. */ continue; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } DECL_INITIAL (decl) = NULL_TREE; init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } if (init != error_mark_node) { gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); } if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with evaluation of the vla VAR_DECL. We need to readd them to the non-decl operand. See PR45784. */ while (TREE_CODE (cond) == COMPOUND_EXPR) cond = TREE_OPERAND (cond, 1); if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) { if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) cond_ok = false; } else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) cond_ok = false; } if (cond_ok && TREE_VEC_ELT (condv, i) != cond) { tree ce = NULL_TREE, *pce = &ce; tree type = TREE_TYPE (TREE_OPERAND (cond, 1)); for (tree c = TREE_VEC_ELT (condv, i); c != cond; c = TREE_OPERAND (c, 1)) { *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0), TREE_OPERAND (cond, 1)); pce = &TREE_OPERAND (*pce, 1); } TREE_OPERAND (cond, 1) = ce; TREE_VEC_ELT (condv, i) = cond; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; if (!fail && TREE_CODE (cond) == NE_EXPR && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))) && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))) != INTEGER_CST)) { /* For pointer to VLA, transform != into < or > depending on whether incr is increment or decrement. */ if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) TREE_SET_CODE (cond, LT_EXPR); else TREE_SET_CODE (cond, GT_EXPR); } incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) break; incr = TREE_OPERAND (incr, 1); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } if (!fail && incr_ok && TREE_CODE (cond) == NE_EXPR) { tree i = TREE_OPERAND (incr, 1); i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl); i = c_fully_fold (i, false, NULL); if (!final_p && TREE_CODE (i) != INTEGER_CST) ; else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) { tree unit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); if (unit) { enum tree_code ccode = GT_EXPR; unit = c_fully_fold (unit, false, NULL); i = fold_convert (TREE_TYPE (unit), i); if (operand_equal_p (unit, i, 0)) ccode = LT_EXPR; if (ccode == GT_EXPR) { i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i); if (i == NULL_TREE || !operand_equal_p (unit, i, 0)) { error_at (elocus, "increment is not constant 1 or " "-1 for %<!=%> condition"); fail = true; } } if (TREE_CODE (unit) != INTEGER_CST) /* For pointer to VLA, transform != into < or > depending on whether the pointer is incremented or decremented in each iteration. */ TREE_SET_CODE (cond, ccode); } } else { if (!integer_onep (i) && !integer_minus_onep (i)) { error_at (elocus, "increment is not constant 1 or -1 for" " %<!=%> condition"); fail = true; } } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (code); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_ORIG_DECLS (t) = orig_declv; SET_EXPR_LOCATION (t, locus); return t; } } /* Type for passing data in between c_omp_check_loop_iv and c_omp_check_loop_iv_r. */ struct c_omp_check_loop_iv_data { tree declv; bool fail; bool maybe_nonrect; location_t stmt_loc; location_t expr_loc; int kind; int idx; walk_tree_lh lh; hash_set<tree> *ppset; }; /* Return -1 if DECL is not a loop iterator in loop nest D, otherwise return the index of the loop in which it is an iterator. Return TREE_VEC_LENGTH (d->declv) if it is a C++ range for iterator. */ static int c_omp_is_loop_iterator (tree decl, struct c_omp_check_loop_iv_data *d) { for (int i = 0; i < TREE_VEC_LENGTH (d->declv); i++) if (decl == TREE_VEC_ELT (d->declv, i) || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST && decl == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i)))) return i; else if (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST && TREE_CHAIN (TREE_VEC_ELT (d->declv, i)) && (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i))) == TREE_VEC) && decl == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv, i)), 2)) return TREE_VEC_LENGTH (d->declv); return -1; } /* Helper function called via walk_tree, to diagnose uses of associated loop IVs inside of lb, b and incr expressions of OpenMP loops. */ static tree c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data) { struct c_omp_check_loop_iv_data *d = (struct c_omp_check_loop_iv_data *) data; if (DECL_P (*tp)) { int idx = c_omp_is_loop_iterator (*tp, d); if (idx == -1) return NULL_TREE; if ((d->kind & 4) && idx < d->idx) { d->maybe_nonrect = true; return NULL_TREE; } if (d->ppset->add (*tp)) return NULL_TREE; location_t loc = d->expr_loc; if (loc == UNKNOWN_LOCATION) loc = d->stmt_loc; switch (d->kind & 3) { case 0: error_at (loc, "initializer expression refers to " "iteration variable %qD", *tp); break; case 1: error_at (loc, "condition expression refers to " "iteration variable %qD", *tp); break; case 2: error_at (loc, "increment expression refers to " "iteration variable %qD", *tp); break; } d->fail = true; } else if (d->ppset->add (*tp)) *walk_subtrees = 0; /* Don't walk dtors added by C++ wrap_cleanups_r. */ else if (TREE_CODE (*tp) == TRY_CATCH_EXPR && TRY_CATCH_IS_CLEANUP (*tp)) { *walk_subtrees = 0; return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data, NULL, d->lh); } return NULL_TREE; } /* Check the allowed expressions for non-rectangular loop nest lb and b expressions. Return the outer var decl referenced in the expression. */ static tree c_omp_check_nonrect_loop_iv (tree *tp, struct c_omp_check_loop_iv_data *d, walk_tree_lh lh) { d->maybe_nonrect = false; if (d->fail) return NULL_TREE; hash_set<tree> pset; hash_set<tree> *ppset = d->ppset; d->ppset = &pset; tree t = *tp; if (TREE_CODE (t) == TREE_VEC && TREE_VEC_LENGTH (t) == 3 && DECL_P (TREE_VEC_ELT (t, 0)) && c_omp_is_loop_iterator (TREE_VEC_ELT (t, 0), d) >= 0) { d->kind &= 3; walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh); walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh); d->ppset = ppset; return d->fail ? NULL_TREE : TREE_VEC_ELT (t, 0); } while (CONVERT_EXPR_P (t)) t = TREE_OPERAND (t, 0); tree a1 = t, a2 = integer_zero_node; bool neg_a1 = false, neg_a2 = false; switch (TREE_CODE (t)) { case PLUS_EXPR: case MINUS_EXPR: a1 = TREE_OPERAND (t, 0); a2 = TREE_OPERAND (t, 1); while (CONVERT_EXPR_P (a1)) a1 = TREE_OPERAND (a1, 0); while (CONVERT_EXPR_P (a2)) a2 = TREE_OPERAND (a2, 0); if (DECL_P (a1) && c_omp_is_loop_iterator (a1, d) >= 0) { a2 = TREE_OPERAND (t, 1); if (TREE_CODE (t) == MINUS_EXPR) neg_a2 = true; t = a1; break; } if (DECL_P (a2) && c_omp_is_loop_iterator (a2, d) >= 0) { a1 = TREE_OPERAND (t, 0); if (TREE_CODE (t) == MINUS_EXPR) neg_a1 = true; t = a2; a2 = a1; break; } if (TREE_CODE (a1) == MULT_EXPR && TREE_CODE (a2) == MULT_EXPR) { tree o1 = TREE_OPERAND (a1, 0); tree o2 = TREE_OPERAND (a1, 1); while (CONVERT_EXPR_P (o1)) o1 = TREE_OPERAND (o1, 0); while (CONVERT_EXPR_P (o2)) o2 = TREE_OPERAND (o2, 0); if ((DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0) || (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0)) { a2 = TREE_OPERAND (t, 1); if (TREE_CODE (t) == MINUS_EXPR) neg_a2 = true; t = a1; break; } } if (TREE_CODE (a2) == MULT_EXPR) { a1 = TREE_OPERAND (t, 0); if (TREE_CODE (t) == MINUS_EXPR) neg_a1 = true; t = a2; a2 = a1; break; } if (TREE_CODE (a1) == MULT_EXPR) { a2 = TREE_OPERAND (t, 1); if (TREE_CODE (t) == MINUS_EXPR) neg_a2 = true; t = a1; break; } a2 = integer_zero_node; break; default: break; } a1 = integer_one_node; if (TREE_CODE (t) == MULT_EXPR) { tree o1 = TREE_OPERAND (t, 0); tree o2 = TREE_OPERAND (t, 1); while (CONVERT_EXPR_P (o1)) o1 = TREE_OPERAND (o1, 0); while (CONVERT_EXPR_P (o2)) o2 = TREE_OPERAND (o2, 0); if (DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0) { a1 = TREE_OPERAND (t, 1); t = o1; } else if (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0) { a1 = TREE_OPERAND (t, 0); t = o2; } } d->kind &= 3; tree ret = NULL_TREE; if (DECL_P (t) && c_omp_is_loop_iterator (t, d) >= 0) { location_t loc = d->expr_loc; if (loc == UNKNOWN_LOCATION) loc = d->stmt_loc; if (!lang_hooks.types_compatible_p (TREE_TYPE (*tp), TREE_TYPE (t))) { if (d->kind == 0) error_at (loc, "outer iteration variable %qD used in initializer" " expression has type other than %qT", t, TREE_TYPE (*tp)); else error_at (loc, "outer iteration variable %qD used in condition" " expression has type other than %qT", t, TREE_TYPE (*tp)); d->fail = true; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (a1))) { error_at (loc, "outer iteration variable %qD multiplier expression" " %qE is not integral", t, a1); d->fail = true; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (a2))) { error_at (loc, "outer iteration variable %qD addend expression" " %qE is not integral", t, a2); d->fail = true; } else { walk_tree_1 (&a1, c_omp_check_loop_iv_r, d, NULL, lh); walk_tree_1 (&a2, c_omp_check_loop_iv_r, d, NULL, lh); } if (!d->fail) { a1 = fold_convert (TREE_TYPE (*tp), a1); a2 = fold_convert (TREE_TYPE (*tp), a2); if (neg_a1) a1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a1), a1); if (neg_a2) a2 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a2), a2); ret = t; *tp = make_tree_vec (3); TREE_VEC_ELT (*tp, 0) = t; TREE_VEC_ELT (*tp, 1) = a1; TREE_VEC_ELT (*tp, 2) = a2; } } else walk_tree_1 (&t, c_omp_check_loop_iv_r, d, NULL, lh); d->ppset = ppset; return ret; } /* Diagnose invalid references to loop iterators in lb, b and incr expressions. */ bool c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; int i; data.declv = declv; data.fail = false; data.maybe_nonrect = false; data.stmt_loc = EXPR_LOCATION (stmt); data.lh = lh; data.ppset = &pset; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++) { tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i); gcc_assert (TREE_CODE (init) == MODIFY_EXPR); tree decl = TREE_OPERAND (init, 0); tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i); gcc_assert (COMPARISON_CLASS_P (cond)); gcc_assert (TREE_OPERAND (cond, 0) == decl); tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i); data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1)); tree vec_outer1 = NULL_TREE, vec_outer2 = NULL_TREE; int kind = 0; if (i > 0 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i) { location_t loc = data.expr_loc; if (loc == UNKNOWN_LOCATION) loc = data.stmt_loc; error_at (loc, "the same loop iteration variables %qD used in " "multiple associated loops", decl); data.fail = true; } /* Handle non-rectangular loop nests. */ if (TREE_CODE (stmt) != OACC_LOOP && (TREE_CODE (TREE_OPERAND (init, 1)) == TREE_VEC || INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (init, 1)))) && i > 0) kind = 4; data.kind = kind; data.idx = i; walk_tree_1 (&TREE_OPERAND (init, 1), c_omp_check_loop_iv_r, &data, NULL, lh); if (data.maybe_nonrect) vec_outer1 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (init, 1), &data, lh); /* Don't warn for C++ random access iterators here, the expression then involves the subtraction and always refers to the original value. The C++ FE needs to warn on those earlier. */ if (decl == TREE_VEC_ELT (declv, i) || (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST && decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i)))) { data.expr_loc = EXPR_LOCATION (cond); data.kind = kind | 1; walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, NULL, lh); if (data.maybe_nonrect) vec_outer2 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (cond, 1), &data, lh); } if (vec_outer1 && vec_outer2 && vec_outer1 != vec_outer2) { location_t loc = data.expr_loc; if (loc == UNKNOWN_LOCATION) loc = data.stmt_loc; error_at (loc, "two different outer iteration variables %qD and %qD" " used in a single loop", vec_outer1, vec_outer2); data.fail = true; } if (vec_outer1 || vec_outer2) OMP_FOR_NON_RECTANGULAR (stmt) = 1; if (TREE_CODE (incr) == MODIFY_EXPR) { gcc_assert (TREE_OPERAND (incr, 0) == decl); incr = TREE_OPERAND (incr, 1); data.kind = 2; if (TREE_CODE (incr) == PLUS_EXPR && TREE_OPERAND (incr, 1) == decl) { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0)); walk_tree_1 (&TREE_OPERAND (incr, 0), c_omp_check_loop_iv_r, &data, NULL, lh); } else { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1)); walk_tree_1 (&TREE_OPERAND (incr, 1), c_omp_check_loop_iv_r, &data, NULL, lh); } } } return !data.fail; } /* Similar, but allows to check the init or cond expressions individually. */ bool c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, int i, tree decl, tree init, tree cond, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; data.declv = declv; data.fail = false; data.maybe_nonrect = false; data.stmt_loc = stmt_loc; data.lh = lh; data.ppset = &pset; data.idx = i; if (i > 0 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i) { error_at (stmt_loc, "the same loop iteration variables %qD used in " "multiple associated loops", decl); data.fail = true; } if (init) { data.expr_loc = EXPR_LOCATION (init); data.kind = 0; walk_tree_1 (&init, c_omp_check_loop_iv_r, &data, NULL, lh); } if (cond) { gcc_assert (COMPARISON_CLASS_P (cond)); data.expr_loc = EXPR_LOCATION (init); data.kind = 1; if (TREE_OPERAND (cond, 0) == decl) walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, NULL, lh); else walk_tree_1 (&TREE_OPERAND (cond, 0), c_omp_check_loop_iv_r, &data, NULL, lh); } return !data.fail; } /* This function splits clauses for OpenACC combined loop constructs. OpenACC combined loop constructs are: #pragma acc kernels loop #pragma acc parallel loop */ tree c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses, bool is_parallel) { tree next, loop_clauses, nc; loop_clauses = *not_loop_clauses = NULL_TREE; for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* Loop clauses. */ case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_TILE: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_PRIVATE: OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Reductions must be duplicated on both constructs. */ case OMP_CLAUSE_REDUCTION: if (is_parallel) { nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (nc) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses; *not_loop_clauses = nc; } OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Parallel/kernels clauses. */ default: OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses; *not_loop_clauses = clauses; break; } } return loop_clauses; } /* This function attempts to split or duplicate clauses for OpenMP combined/composite constructs. Right now there are 30 different constructs. CODE is the innermost construct in the combined construct, and MASK allows to determine which constructs are combined together, as every construct has at least one clause that no other construct has (except for OMP_SECTIONS, but that can be only combined with parallel, and OMP_MASTER, which doesn't have any clauses at all). OpenMP combined/composite constructs are: #pragma omp distribute parallel for #pragma omp distribute parallel for simd #pragma omp distribute simd #pragma omp for simd #pragma omp master taskloop #pragma omp master taskloop simd #pragma omp parallel for #pragma omp parallel for simd #pragma omp parallel loop #pragma omp parallel master #pragma omp parallel master taskloop #pragma omp parallel master taskloop simd #pragma omp parallel sections #pragma omp target parallel #pragma omp target parallel for #pragma omp target parallel for simd #pragma omp target parallel loop #pragma omp target teams #pragma omp target teams distribute #pragma omp target teams distribute parallel for #pragma omp target teams distribute parallel for simd #pragma omp target teams distribute simd #pragma omp target teams loop #pragma omp target simd #pragma omp taskloop simd #pragma omp teams distribute #pragma omp teams distribute parallel for #pragma omp teams distribute parallel for simd #pragma omp teams distribute simd #pragma omp teams loop */ void c_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* Add implicit nowait clause on #pragma omp parallel {for,for simd,sections}. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_DEPEND: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_FOR; if (code != OMP_SIMD) OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_ALIGNED: case OMP_CLAUSE_NONTEMPORAL: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_PRIORITY: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_CLAUSE_BIND: s = C_OMP_CLAUSE_SPLIT_LOOP; break; /* Duplicate this to all of taskloop, distribute, for, simd and loop. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { /* This must be #pragma omp target simd */ s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if (code == OMP_LOOP) s = C_OMP_CLAUSE_SPLIT_LOOP; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* Private clause is supported on all constructs but master, it is enough to put it on the innermost one other than master. For #pragma omp {for,sections} put it on parallel though, as that's what we did for OpenMP 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break; default: gcc_unreachable (); } break; /* Firstprivate clause is supported on all constructs but simd, master and loop. Put it on the outermost of those and duplicate on teams and parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (code == OMP_SIMD && (mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) { /* This must be #pragma omp target simd. */ s = C_OMP_CLAUSE_SPLIT_TARGET; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) /* This must be #pragma omp parallel master taskloop{, simd}. */ s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else /* This must be #pragma omp parallel{, for{, simd}, sections,loop} or #pragma omp target parallel. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* This must be one of #pragma omp {,target }teams {distribute,loop} #pragma omp target teams #pragma omp {,target }teams distribute simd. */ gcc_assert (code == OMP_DISTRIBUTE || code == OMP_LOOP || code == OMP_TEAMS || code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be #pragma omp distribute simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { /* This must be #pragma omp {,{,parallel }master }taskloop simd or #pragma omp {,parallel }master taskloop. */ gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP); s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else { /* This must be #pragma omp for simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* Lastprivate is allowed on distribute, for, sections, taskloop, loop and simd. In parallel {for{, simd},sections} we actually want to put it on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_DISTRIBUTE) { s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; } if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } if (code == OMP_TASKLOOP) { s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if (code == OMP_LOOP) { s = C_OMP_CLAUSE_SPLIT_LOOP; break; } gcc_assert (code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN (c) = cclauses[s]; cclauses[s] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Shared and default clauses are allowed on parallel, teams and taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; } s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* order clauses are allowed on for, simd and loop. */ case OMP_CLAUSE_ORDER: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_ORDER); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_SIMD; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_LOOP) s = C_OMP_CLAUSE_SPLIT_LOOP; else s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Reduction is allowed on simd, for, parallel, sections, taskloop, teams and loop. Duplicate it on all of them, but omit on for or sections if parallel is present (unless inscan, in that case omit on parallel). If taskloop or loop is combined with parallel, omit it on parallel. */ case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_TASK (clauses)) { if (code == OMP_SIMD || code == OMP_LOOP) { error_at (OMP_CLAUSE_LOCATION (clauses), "invalid %<task%> reduction modifier on construct " "combined with %<simd%> or %<loop%>"); OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; } else if (code != OMP_SECTIONS && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0 && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0) { error_at (OMP_CLAUSE_LOCATION (clauses), "invalid %<task%> reduction modifier on construct " "not combined with %<parallel%>, %<for%> or " "%<sections%>"); OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; } } if (OMP_CLAUSE_REDUCTION_INSCAN (clauses) && ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))) != 0)) { error_at (OMP_CLAUSE_LOCATION (clauses), "%<inscan%> %<reduction%> clause on construct other " "than %<for%>, %<simd%>, %<for simd%>, " "%<parallel for%>, %<parallel for simd%>"); OMP_CLAUSE_REDUCTION_INSCAN (clauses) = 0; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_INSCAN (c) = OMP_CLAUSE_REDUCTION_INSCAN (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_INSCAN (c) = OMP_CLAUSE_REDUCTION_INSCAN (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0 && !OMP_CLAUSE_REDUCTION_INSCAN (clauses)) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS || code == OMP_PARALLEL || code == OMP_MASTER) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (code == OMP_TASKLOOP) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if (code == OMP_LOOP) s = C_OMP_CLAUSE_SPLIT_LOOP; else if (code == OMP_SIMD) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_INSCAN (c) = OMP_CLAUSE_REDUCTION_INSCAN (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; } else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IN_REDUCTION: /* in_reduction on taskloop simd becomes reduction on the simd and keeps being in_reduction on taskloop. */ if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_CLAUSE_IF: if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK) { s = C_OMP_CLAUSE_SPLIT_COUNT; switch (OMP_CLAUSE_IF_MODIFIER (clauses)) { case OMP_PARALLEL: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_SIMD: if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_TASKLOOP: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_TARGET: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; break; default: break; } if (s != C_OMP_CLAUSE_SPLIT_COUNT) break; /* Error-recovery here, invalid if-modifier specified, add the clause to just one construct. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else gcc_unreachable (); break; } /* Otherwise, duplicate if clause to all constructs. */ if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_LINEAR: /* Linear clause is allowed on simd and for. Put it on the innermost construct. */ if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_NOWAIT: /* Nowait clause is allowed on target, for and sections, but is not allowed on parallel for or parallel sections. Therefore, put it on target construct if present, because that can only be combined with parallel for{, simd} and not with for{, simd}, otherwise to the worksharing construct. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; else s = C_OMP_CLAUSE_SPLIT_FOR; break; default: gcc_unreachable (); } OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } if (!flag_checking) return; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 && code != OMP_SECTIONS && code != OMP_LOOP) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); if (code != OMP_SIMD) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); } /* qsort callback to compare #pragma omp declare simd clauses. */ static int c_omp_declare_simd_clause_cmp (const void *p, const void *q) { tree a = *(const tree *) p; tree b = *(const tree *) q; if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) { if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) return -1; return 1; } if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) { int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); if (c < d) return 1; if (c > d) return -1; } return 0; } /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd CLAUSES on FNDECL into argument indexes and sort them. */ tree c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) { tree c; vec<tree> clvec = vNULL; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { tree decl = OMP_CLAUSE_DECL (c); tree arg; int idx; for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a function argument", decl); continue; } OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { decl = OMP_CLAUSE_LINEAR_STEP (c); for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a function argument", decl); continue; } OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (integer_type_node, idx); } } clvec.safe_push (c); } if (!clvec.is_empty ()) { unsigned int len = clvec.length (), i; clvec.qsort (c_omp_declare_simd_clause_cmp); clauses = clvec[0]; for (i = 0; i < len; i++) OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; } else clauses = NULL_TREE; clvec.release (); return clauses; } /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ void c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) { tree c; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; tree arg; for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_DECL (c) = arg; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c)); for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_LINEAR_STEP (c) = arg; } } } /* Return true for __func__ and similar function-local predefined variables (which are in OpenMP predetermined shared, allowed in shared/firstprivate clauses). */ bool c_omp_predefined_variable (tree decl) { if (VAR_P (decl) && DECL_ARTIFICIAL (decl) && TREE_READONLY (decl) && TREE_STATIC (decl) && DECL_NAME (decl) && (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME] || DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME] || DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME])) return true; return false; } /* OMP_CLAUSE_DEFAULT_UNSPECIFIED unless OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Predetermine artificial variables holding integral values, those are usually result of gimplify_one_sizepos or SAVE_EXPR gimplification. */ if (VAR_P (decl) && DECL_ARTIFICIAL (decl) && INTEGRAL_TYPE_P (TREE_TYPE (decl))) return OMP_CLAUSE_DEFAULT_SHARED; if (c_omp_predefined_variable (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; } /* OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED unless OpenMP mapping attribute of DECL is predetermined. */ enum omp_clause_defaultmap_kind c_omp_predetermined_mapping (tree decl) { /* Predetermine artificial variables holding integral values, those are usually result of gimplify_one_sizepos or SAVE_EXPR gimplification. */ if (VAR_P (decl) && DECL_ARTIFICIAL (decl) && INTEGRAL_TYPE_P (TREE_TYPE (decl))) return OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE; if (c_omp_predefined_variable (decl)) return OMP_CLAUSE_DEFAULTMAP_TO; return OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED; } /* Diagnose errors in an OpenMP context selector, return CTX if it is correct or error_mark_node otherwise. */ tree c_omp_check_context_selector (location_t loc, tree ctx) { /* Each trait-set-selector-name can only be specified once. There are just 4 set names. */ for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1)) for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2)) if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2)) { error_at (loc, "selector set %qs specified more than once", IDENTIFIER_POINTER (TREE_PURPOSE (t1))); return error_mark_node; } for (tree t = ctx; t; t = TREE_CHAIN (t)) { /* Each trait-selector-name can only be specified once. */ if (list_length (TREE_VALUE (t)) < 5) { for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2)) if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2)) { error_at (loc, "selector %qs specified more than once in set %qs", IDENTIFIER_POINTER (TREE_PURPOSE (t1)), IDENTIFIER_POINTER (TREE_PURPOSE (t))); return error_mark_node; } } else { hash_set<tree> pset; for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) if (pset.add (TREE_PURPOSE (t1))) { error_at (loc, "selector %qs specified more than once in set %qs", IDENTIFIER_POINTER (TREE_PURPOSE (t1)), IDENTIFIER_POINTER (TREE_PURPOSE (t))); return error_mark_node; } } static const char *const kind[] = { "host", "nohost", "cpu", "gpu", "fpga", "any", NULL }; static const char *const vendor[] = { "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel", "llvm", "nvidia", "pgi", "ti", "unknown", NULL }; static const char *const extension[] = { NULL }; static const char *const atomic_default_mem_order[] = { "seq_cst", "relaxed", "acq_rel", NULL }; struct known_properties { const char *set; const char *selector; const char *const *props; }; known_properties props[] = { { "device", "kind", kind }, { "implementation", "vendor", vendor }, { "implementation", "extension", extension }, { "implementation", "atomic_default_mem_order", atomic_default_mem_order } }; for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) for (unsigned i = 0; i < ARRAY_SIZE (props); i++) if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)), props[i].selector) && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)), props[i].set)) for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2)) for (unsigned j = 0; ; j++) { if (props[i].props[j] == NULL) { if (TREE_PURPOSE (t2) && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), " score")) break; if (props[i].props == atomic_default_mem_order) { error_at (loc, "incorrect property %qs of %qs selector", IDENTIFIER_POINTER (TREE_PURPOSE (t2)), "atomic_default_mem_order"); return error_mark_node; } else if (TREE_PURPOSE (t2)) warning_at (loc, 0, "unknown property %qs of %qs selector", IDENTIFIER_POINTER (TREE_PURPOSE (t2)), props[i].selector); else warning_at (loc, 0, "unknown property %qE of %qs selector", TREE_VALUE (t2), props[i].selector); break; } else if (TREE_PURPOSE (t2) == NULL_TREE) { const char *str = TREE_STRING_POINTER (TREE_VALUE (t2)); if (!strcmp (str, props[i].props[j]) && ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2)) == strlen (str) + 1)) break; } else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), props[i].props[j])) break; } } return ctx; } /* Register VARIANT as variant of some base function marked with #pragma omp declare variant. CONSTRUCT is corresponding construct selector set. */ void c_omp_mark_declare_variant (location_t loc, tree variant, tree construct) { tree attr = lookup_attribute ("omp declare variant variant", DECL_ATTRIBUTES (variant)); if (attr == NULL_TREE) { attr = tree_cons (get_identifier ("omp declare variant variant"), unshare_expr (construct), DECL_ATTRIBUTES (variant)); DECL_ATTRIBUTES (variant) = attr; return; } if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE) || (construct != NULL_TREE && omp_context_selector_set_compare ("construct", TREE_VALUE (attr), construct))) error_at (loc, "%qD used as a variant with incompatible %<construct%> " "selector sets", variant); } /* For OpenACC, the OMP_CLAUSE_MAP_KIND of an OMP_CLAUSE_MAP is used internally to distinguish clauses as seen by the user. Return the "friendly" clause name for error messages etc., where possible. See also c/c-parser.c:c_parser_oacc_data_clause and cp/parser.c:cp_parser_oacc_data_clause. */ const char * c_omp_map_clause_name (tree clause, bool oacc) { if (oacc && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (clause)) { case GOMP_MAP_FORCE_ALLOC: case GOMP_MAP_ALLOC: return "create"; case GOMP_MAP_FORCE_TO: case GOMP_MAP_TO: return "copyin"; case GOMP_MAP_FORCE_FROM: case GOMP_MAP_FROM: return "copyout"; case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_TOFROM: return "copy"; case GOMP_MAP_RELEASE: return "delete"; case GOMP_MAP_FORCE_PRESENT: return "present"; case GOMP_MAP_ATTACH: return "attach"; case GOMP_MAP_FORCE_DETACH: case GOMP_MAP_DETACH: return "detach"; case GOMP_MAP_DEVICE_RESIDENT: return "device_resident"; case GOMP_MAP_LINK: return "link"; case GOMP_MAP_FORCE_DEVICEPTR: return "deviceptr"; default: break; } return omp_clause_code_name[OMP_CLAUSE_CODE (clause)]; }
trmm_x_sky_u_lo_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for(ALPHA_INT j = 0; j < columns; j++) alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT cr = 0; cr < mat->rows; ++cr) { ALPHA_INT start = mat->pointers[cr]; ALPHA_INT end = mat->pointers[cr + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end; ++ai) { ALPHA_INT ac = cr - eles_num + idx; if (ac < cr) { ALPHA_Number t; alpha_mul(t, alpha, mat->values[ai]); alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]); } else if(ac == cr) alpha_madde(y[index2(cr, cc, ldy)], alpha, x[index2(ac, cc, ldx)]); idx++; } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
mkl_util.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #ifdef INTEL_MKL #include <string> #include <memory> #include <unordered_map> #include <utility> #include <vector> #if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY) #ifndef INTEL_MKL #error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL" #endif #endif #if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY) #error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined" #endif #ifdef INTEL_MKL_ML_ONLY #error \ "Compiling for INTEL MKL ML only is no longer supported.Please use MKL DNN (the default option for --config=mkl)" #endif #ifdef INTEL_MKL_ML_ONLY #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" #include "mkl_trans.h" #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/graph/mkl_graph_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/env_var.h" #ifndef INTEL_MKL_ML_ONLY #include "mkldnn.hpp" #include "tensorflow/core/lib/core/stringpiece.h" using mkldnn::engine; using mkldnn::memory; using mkldnn::padding_kind; using mkldnn::primitive; using mkldnn::reorder; #endif #ifdef _WIN32 typedef unsigned int uint; #endif namespace tensorflow { // The file contains a number of utility classes and functions used by MKL // enabled kernels // This class encapsulates all the meta data that is associated with an MKL // tensor. A tensor is an MKL tensor if it was created as the result of an // MKL operation, and did not go through a conversion to a standard // Tensorflow tensor. typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims; typedef enum { Dim_N = 0, Dim_C = 1, Dim_H = 2, Dim_W = 3, Dim_O = 0, Dim_I = 1 } MklDnnDims; typedef enum { Dim3d_N = 0, Dim3d_C = 1, Dim3d_D = 2, Dim3d_H = 3, Dim3d_W = 4, Dim3d_O = 0, Dim3d_I = 1 } MklDnnDims3D; static const int kSmallBatchSize = 32; #ifdef INTEL_MKL_ML_ONLY class MklShape { public: MklShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy ~MklShape() { if (sizes_) delete[] sizes_; if (strides_) delete[] strides_; if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS); if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS); if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_; } const bool IsMklTensor() const { return isMklTensor_; } void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; } void SetDimensions(const size_t dimension) { dimension_ = dimension; } void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; } void SetMklLayout(const void* primitive, size_t resourceType) { CHECK_EQ( dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive, (dnnResourceType_t)resourceType), E_SUCCESS); } void SetTfLayout(const size_t dimension, const size_t* sizes, const size_t* strides) { dimension_ = dimension; if (dimension > 0) { // MKl doesn't support zero dimension tensors sizes_ = new size_t[dimension]; strides_ = new size_t[dimension]; for (int ii = 0; ii < dimension; ii++) { sizes_[ii] = sizes[ii]; strides_[ii] = strides[ii]; } CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides), E_SUCCESS); } } // Default case - MKL dim ordering is opposite of TF dim ordering // MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim // TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim // For layers that rely on data_format semantics (conv, pooling etc.) // or operate only on certain dimensions (relu, concat, split etc.), // Mkl APIs might require us to reorder these dimensions. In such cases, // kernels should explicitly set this map void SetTfDimOrder(const size_t dimension) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = dimension - (ii + 1); } } void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii]; } } void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { CHECK_EQ(dimension, 4); CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N; } const dnnLayout_t GetMklLayout() const { return mklLayout_; } const dnnLayout_t GetTfLayout() const { return tfLayout_; } const dnnLayout_t GetCurLayout() const { return isMklTensor_ ? mklLayout_ : tfLayout_; } size_t GetDimension() const { return dimension_; } const size_t* GetSizes() const { return sizes_; } int64 dim_size(int index) const { return sizes_[index]; } int64 tf_dim_size(int index) const { return sizes_[tf_to_mkl_dim_map_[index]]; } const size_t* GetStrides() const { return strides_; } const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; } size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Channel dimension. bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Batch dimension. bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Width dimension. bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Height dimension. bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NCHW format. bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NHWC format. bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } void GetConvertedFlatData(dnnLayout_t targetLayout, void* input, void* output) const { dnnLayout_t curLayout; if (isMklTensor_) curLayout = mklLayout_; else curLayout = tfLayout_; dnnPrimitive_t convert; CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout), E_SUCCESS); CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS); CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS); } // The following methods are used for serializing and de-serializing the // contents of the mklshape object. // The data is serialized in this order // isMklTensor_ // dimension_ // sizes_ // strides_ // mklLayout_ // tfLayout_ // tf_to_mkl_dim_map_ #define SIZE_OF_MKL_DNN_BUF \ (dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to // serialize dnn_layout pointer // Size of buffer to hold the serialized object, the size is computed as // follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) + // sizeof(strides_) // + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer) // + sizeof(tf_to_mkl_dim_map_) #define SIZE_OF_MKL_SERIAL_DATA(dims) \ (2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF) // First we need to define some macro for offsets into the serial buffer where // different elements of Mklshape is written/read from #define IS_MKL_TENSOR_OFFSET 0 // Location from start of buffer where isMklTensor_ is serialized #define DIMS_OFFSET \ (IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_ // Location of sizes. Note dim is not used here, left here // to make macros consistent. #define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t)) #define STRIDES_OFFSET(dims) \ (SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides #define MKL_LAYOUT_OFFSET(dims) \ (STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_ #define TF_LAYOUT_OFFSET(dims) \ (MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_ // Location of tf_to_mkl_dim_map_ #define TF_TO_MKL_DIM_MAP_OFFSET(dims) \ (TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // TODO(agramesh1) make sure to create a const to share with rewrite pass // for min size of MKL metadata tensor. void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) { CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize"; // Make sure buffer holds at least isMklTensor_ isMklTensor_ = *reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0; if (isMklTensor_) { // If it is an MKL Tensor then read the rest dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET)); CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small in DeSerialize"; sizes_ = new size_t[dimension_]; strides_ = new size_t[dimension_]; tf_to_mkl_dim_map_ = new size_t[dimension_]; for (int i = 0; i < dimension_; i++) { sizes_[i] = reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i]; strides_[i] = reinterpret_cast<const size_t*>( buf + STRIDES_OFFSET(dimension_))[i]; tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>( buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i]; } CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } void SerializeMklShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small to Serialize"; *reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) = isMklTensor_ ? 1 : 0; if (isMklTensor_) { *(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_; for (int i = 0; i < dimension_; i++) { reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] = sizes_[i]; reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] = strides_[i]; reinterpret_cast<size_t*>(buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] = tf_to_mkl_dim_map_[i]; } CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ( dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } private: bool isMklTensor_ = false; // Flag to indicate if the tensor is an MKL tensor or not dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding // Tensorflow tensor, used when conversion from MKL to standard tensor size_t dimension_ = 0; size_t* sizes_ = nullptr; // Required by MKL for conversions size_t* strides_ = nullptr; // Required by MKL for conversions size_t* tf_to_mkl_dim_map_ = nullptr; // TF dimension corresponding to this MKL dimension }; #else // Forward decl TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format); TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format); memory::dims CalculateTFStrides(const memory::dims& dims_tf_order); memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype); class MklDnnShape { private: typedef struct { /// Flag to indicate if the tensor is an MKL tensor or not bool is_mkl_tensor_ = false; /// Number of dimensions in Tensorflow format size_t dimension_ = 0; /// Required by MKLDNN for conversions mkldnn_dims_t sizes_; // Required by MKL for conversions memory::format tf_data_format_ = memory::format::format_undef; memory::data_type T_ = memory::data_type::data_undef; // MKL layout mkldnn_memory_desc_t mkl_md_; /// TF dimension corresponding to this MKL dimension mkldnn_dims_t map_; } MklShapeData; MklShapeData data_; typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t; #define INVALID_DIM_SIZE -1 public: MklDnnShape() { for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); ++i) { data_.sizes_[i] = -1; } for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) { data_.map_[i] = -1; } } ~MklDnnShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy /// Helper function to compare memory::desc objects for MklDnn. /// May be this should go into MklDnn directly. inline bool CompareMklDnnLayouts(const memory::desc& md1, const memory::desc& md2) const { mkldnn_memory_desc_t mdd1 = md1.data; mkldnn_memory_desc_t mdd2 = md2.data; const char* d1 = reinterpret_cast<const char*>(&mdd1); const char* d2 = reinterpret_cast<const char*>(&mdd2); size_t md_size = sizeof(mdd1); for (size_t i = 0; i < md_size; i++) { if (*d1++ != *d2++) { return false; } } return true; } /// Equality function for MklDnnShape objects /// @return true if both are equal; false otherwise. inline bool operator==(const MklDnnShape& input_shape) const { if (this->IsMklTensor() != input_shape.IsMklTensor()) { return false; } // If input tensors are in Mkl layout, then we check for dimensions and // sizes. if (this->IsMklTensor()) { return this->GetTfShape() == input_shape.GetTfShape() && CompareMklDnnLayouts(this->GetMklLayout(), input_shape.GetMklLayout()); } return true; } /// Equality operator for MklDnnShape and TFShape. /// Returns: true if TF shapes for both are the same, false otherwise inline bool operator==(const TensorShape& input_shape) const { if (!this->IsMklTensor()) { return false; } return this->GetTfShape() == input_shape; } inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; } inline void SetMklTensor(bool is_mkl_tensor) { data_.is_mkl_tensor_ = is_mkl_tensor; } inline void SetDimensions(const size_t dimension) { data_.dimension_ = dimension; } inline size_t GetDimension(char dimension) const { int index = GetMklDnnTensorDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline size_t GetDimension3D(char dimension) const { int index = GetMklDnnTensor3DDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline int32 GetMklDnnTensorDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims::Dim_N; case 'C': return MklDnnDims::Dim_C; case 'H': return MklDnnDims::Dim_H; case 'W': return MklDnnDims::Dim_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline int32 GetMklDnnTensor3DDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims3D::Dim3d_N; case 'C': return MklDnnDims3D::Dim3d_C; case 'D': return MklDnnDims3D::Dim3d_D; case 'H': return MklDnnDims3D::Dim3d_H; case 'W': return MklDnnDims3D::Dim3d_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline size_t GetDimension() const { return data_.dimension_; } inline const int* GetSizes() const { return reinterpret_cast<const int*>(&data_.sizes_[0]); } // Returns an mkldnn::memory::dims object that contains the sizes of this // MklDnnShape object. inline memory::dims GetSizesAsMklDnnDims() const { memory::dims retVal; if (data_.is_mkl_tensor_) { size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); for (size_t i = 0; i < dimensions; i++) { if (data_.sizes_[i] != INVALID_DIM_SIZE) retVal.push_back(data_.sizes_[i]); } } else { CHECK_EQ(data_.is_mkl_tensor_, true); } return retVal; } inline int64 DimSize(int index) const { CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0])); return data_.sizes_[index]; } /// Return TensorShape that describes the Tensorflow shape of the tensor /// represented by this MklShape. inline TensorShape GetTfShape() const { CHECK_EQ(data_.is_mkl_tensor_, true); std::vector<int32> shape(data_.dimension_, -1); if (data_.tf_data_format_ != memory::format::blocked) { for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[TfDimIdx(idx)]; } } else { // If Tensorflow shape is in Blocked format, then we don't have dimension // map for it. So we just create Tensorflow shape from sizes in the // specified order. for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[idx]; } } TensorShape ts; bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok(); CHECK_EQ(ret, true); return ts; } inline void SetElemType(memory::data_type dt) { data_.T_ = dt; } inline const memory::data_type GetElemType() { return data_.T_; } inline void SetMklLayout(memory::primitive_desc* pd) { CHECK_NOTNULL(pd); data_.mkl_md_ = pd->desc().data; } inline void SetMklLayout(memory::desc* md) { CHECK_NOTNULL(md); data_.mkl_md_ = md->data; } inline const memory::desc GetMklLayout() const { return memory::desc(data_.mkl_md_); } inline memory::format GetTfDataFormat() const { return data_.tf_data_format_; } /// We don't create primitive_descriptor for TensorFlow layout now. /// We use lazy evaluation and create it only when needed. Input format can /// also be Blocked format. inline void SetTfLayout(size_t dims, const memory::dims& sizes, memory::format format) { CHECK_EQ(dims, sizes.size()); data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ii++) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { SetTfDimOrder(dims, format); } } inline const memory::desc GetTfLayout() const { memory::dims dims; for (size_t ii = 0; ii < data_.dimension_; ii++) { dims.push_back(data_.sizes_[ii]); } // Create Blocked memory desc if input TF format was set like that. if (data_.tf_data_format_ == memory::format::blocked) { auto strides = CalculateTFStrides(dims); return CreateBlockedMemDescHelper(dims, strides, data_.T_); } else { return memory::desc(dims, data_.T_, data_.tf_data_format_); } } inline const memory::desc GetCurLayout() const { return IsMklTensor() ? GetMklLayout() : GetTfLayout(); } // nhasabni - I've removed SetTfDimOrder that was setting default order in // case of MKL-ML. We don't need a case of default dimension order because // when an operator that does not get data_format attribute gets all inputs // in Tensorflow format, it will produce output in Tensorflow format. inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) { CHECK(dimension == data_.dimension_); for (size_t ii = 0; ii < dimension; ii++) { data_.map_[ii] = map[ii]; } } inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { if (dimension == 5) { CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<3>(data_format, '0')] = MklDnnDims3D::Dim3d_D; data_.map_[GetTensorDimIndex<3>(data_format, '1')] = MklDnnDims3D::Dim3d_H; data_.map_[GetTensorDimIndex<3>(data_format, '2')] = MklDnnDims3D::Dim3d_W; data_.map_[GetTensorDimIndex<3>(data_format, 'C')] = MklDnnDims3D::Dim3d_C; data_.map_[GetTensorDimIndex<3>(data_format, 'N')] = MklDnnDims3D::Dim3d_N; } else { CHECK_EQ(dimension, 4); CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W; data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H; data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C; data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N; } } inline void SetTfDimOrder(const size_t dimension, memory::format format) { TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format); SetTfDimOrder(dimension, data_format); } inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; } inline size_t TfDimIdx(int index) const { return data_.map_[index]; } inline int64 TfDimSize(int index) const { return data_.sizes_[TfDimIdx(index)]; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Channel dimension. inline bool IsMklChannelDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_C; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Batch dimension. inline bool IsMklBatchDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_N; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Width dimension. inline bool IsMklWidthDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_W; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Height dimension. inline bool IsMklHeightDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_H; } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NCHW format. inline bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NHWC format. inline bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// The following methods are used for serializing and de-serializing the /// contents of the mklshape object. /// The data is serialized in this order /// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_; /// Size of buffer to hold the serialized object, the size is computed by /// following above mentioned order inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); } void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small to SerializeMklDnnShape"; *reinterpret_cast<MklShapeData*>(buf) = data_; } void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) { // Make sure buffer holds at least is_mkl_tensor_. CHECK(buf_size >= sizeof(data_.is_mkl_tensor_)) << "Buffer size is too small in DeSerializeMklDnnShape"; const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf); if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small in DeSerializeMklDnnShape"; data_ = *reinterpret_cast<const MklShapeData*>(buf); } } }; #endif // List of MklShape objects. Used in Concat/Split layers. #ifndef INTEL_MKL_ML_ONLY typedef std::vector<MklDnnShape> MklDnnShapeList; #else typedef std::vector<MklShape> MklShapeList; #endif #ifdef INTEL_MKL_ML_ONLY // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { if (!s.IsMklTensor()) { return false; } } return true; } template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; for (size_t j = 0; j < mkl_shape.GetDimension(); j++) { // Outermost to innermost dimension output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]); } // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout()); void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data()); void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data()); if (mkl_tensor.NumElements() != 0) { mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer); } return output_tensor; } #else using mkldnn::stream; template <typename T> class MklDnnData; template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklDnnShape& mkl_shape) { Tensor output_tensor; try { if (!mkl_shape.IsMklTensor()) return mkl_tensor; // return input since it is already TF tensor TensorShape output_shape = mkl_shape.GetTfShape();; // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); auto cpu_engine = engine(engine::cpu, 0); MklDnnData<T> input(&cpu_engine); // Get Mkl layout of input tensor. auto input_mkl_md = mkl_shape.GetMklLayout(); auto output_tf_md = mkl_shape.GetTfLayout(); auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine); input.SetUsrMem(input_mkl_md, &mkl_tensor); // reorder if (input.IsReorderNeeded(output_tf_pd)) { std::vector<primitive> net; CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net), true); stream(stream::kind::eager).submit(net).wait(); } else { // If not, just forward input tensor to output tensor. CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape)); } } catch (mkldnn::error& e) { string error_msg = "Status: " + std::to_string(e.status) + ", message: " + string(e.message) + ", in file " + string(__FILE__) + ":" + std::to_string(__LINE__); LOG(FATAL) << "Operation received an exception: " << error_msg; } return output_tensor; } #endif // Get the MKL shape from the second string tensor #ifdef INTEL_MKL_ML_ONLY inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #else inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) { mklshape->DeSerializeMklDnnShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #endif // Gets the actual input inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) { return ctext->input(GetTensorDataIndex(n, ctext->num_inputs())); } inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, OpInputList* input_tensors) { CHECK_NOTNULL(input_tensors); ctext->input_list(name, input_tensors); } #ifdef INTEL_MKL_ML_ONLY inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #else inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklDnnShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklDnnShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #endif #ifndef INTEL_MKL_ML_ONLY /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { // Sanity check. CHECK_NOTNULL(context); CHECK_LT(input_idx, context->num_inputs()); MklDnnShape input_mkl_shape; GetMklShape(context, input_idx, &input_mkl_shape); if (input_mkl_shape.IsMklTensor()) { return input_mkl_shape.GetTfShape(); } else { const Tensor& t = MklGetInput(context, input_idx); return t.shape(); } } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently #ifndef INTEL_MKL_ML_ONLY template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim(pd.get_size() / sizeof(T) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<T>().data()); } #else inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, dnnLayout_t lt_buff, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim( dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) / sizeof(float) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<float>().data()); } #endif template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, TensorShape tf_shape) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); } inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, const size_t* sizes) { // MKL requires strides in NCHW if (data_format == FORMAT_NHWC) { strides[0] = sizes[2]; strides[1] = sizes[0] * sizes[2]; strides[2] = 1; strides[3] = sizes[0] * sizes[1] * sizes[2]; } else { strides[0] = 1; strides[1] = sizes[0]; strides[2] = sizes[0] * sizes[1]; strides[3] = sizes[0] * sizes[1] * sizes[2]; } } #ifdef INTEL_MKL_ML_ONLY inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, TensorShape* tf_shape) { size_t tf_dim = mkl_shape.GetDimension(); const size_t* tf_sizes = mkl_shape.GetSizes(); OP_REQUIRES(context, tf_dim == 4, errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim")); std::vector<int32> sizes; sizes.push_back(tf_sizes[3]); if (data_format_ == FORMAT_NHWC) { sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); sizes.push_back(tf_sizes[2]); } else { sizes.push_back(tf_sizes[2]); sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); } OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape)); } #endif inline int32 GetMklTensorDimIndex(char dimension) { switch (dimension) { case 'N': return MklDims::N; case 'C': return MklDims::C; case 'H': return MklDims::H; case 'W': return MklDims::W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } #ifdef INTEL_MKL_ML_ONLY inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return mkl_shape.dim_size(index); } #endif inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); const Tensor& meta = context->input(idx_meta_in); Tensor output(data.dtype()); Tensor meta_output(meta.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, data.shape())); CHECK(meta_output.CopyFrom(meta, meta.shape())); context->set_output(idx_data_out, output); context->set_output(idx_meta_out, meta_output); } #ifdef INTEL_MKL_ML_ONLY inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #else inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #endif #ifdef INTEL_MKL_ML_ONLY inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #else inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklDnnShape dnn_shape_output; dnn_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, dnn_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifndef INTEL_MKL_ML_ONLY // Set a dummy MKLDNN shape (called when the output is in TF format) inline void SetDummyMklDnnShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context, int idx_in, int idx_out, const MklDnnShape& mkl_shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); AllocateOutputSetMklShape(context, idx_out, mkl_shape); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif // Forward the MKL shape ONLY (used in elementwise and other ops where // we call the eigen implementation and MKL shape is not used) inline void ForwardMklMetaDataInToOut(OpKernelContext* context, uint32 idx_data_in, uint32_t idx_data_out) { uint32 idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs()); uint32 idx_meta_out = GetTensorMetaDataIndex(idx_data_out, context->num_outputs()); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifdef INTEL_MKL_ML_ONLY // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } // We don't need these functions in MKLDNN. We have defined equality operator // on MklDnnShape class directly. // Checks if the TF shape for both MKL tensors is the same or not // Returns: true if both TF shapes are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const MklShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const MklShape* input_shape_1) { return MklCompareShapes(input_shape_1, input_shape_0); } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->dims() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->dims(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // These functions do not compile with MKL-DNN since mkl.h is missing. // We may need to remove them later. // TODO(intel_tf): Remove this routine when faster MKL layout conversion is // out. inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = input.dim_size(0); int64 H = input.dim_size(1); int64 W = input.dim_size(2); int64 C = input.dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C, buf_out + n * stride_n, H * W); } } inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = (*output)->dim_size(0); int64 H = (*output)->dim_size(1); int64 W = (*output)->dim_size(2); int64 C = (*output)->dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W, buf_out + n * stride_n, C); } } #endif // ------------------------------------------------------------------- #ifndef INTEL_MKL_ML_ONLY /// Return MKL-DNN data type (memory::data_type) for input type T /// /// @input None /// @return memory::data_type corresponding to type T template <typename T> static memory::data_type MklDnnType(); /// Instantiation for float type. Add similar instantiations for other /// type if needed. template <> memory::data_type MklDnnType<float>() { return memory::data_type::f32; } /// Map TensorFlow's data format into MKL-DNN 3D data format /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::ndhwc; else if (format == FORMAT_NCHW) return memory::format::ncdhw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map TensorFlow's data format into MKL-DNN data format /// /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::nhwc; else if (format == FORMAT_NCHW) return memory::format::nchw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map MKL-DNN data format to TensorFlow's data format /// /// @input: memory::format /// @return: Tensorflow data format corresponding to memory::format /// Fails with an error if invalid data format. inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) { if (format == memory::format::nhwc || format == memory::format::ndhwc) return FORMAT_NHWC; else if (format == memory::format::nchw || format == memory::format::ncdhw) return FORMAT_NCHW; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure // that we don't come here. return FORMAT_NHWC; } /// Map TensorShape object into memory::dims required by MKL-DNN /// /// This function will simply map input TensorShape into MKL-DNN dims /// naively. So it will preserve the order of dimensions. E.g., if /// input tensor is in NHWC format, then dims will be in NHWC format /// also. /// /// @input TensorShape object in shape /// @return memory::dims corresponding to TensorShape inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) { memory::dims dims(shape.dims()); for (int d = 0; d < shape.dims(); ++d) { dims[d] = shape.dim_size(d); } return dims; } /// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN /// /// This function is a specific one than above function. It will map input /// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the /// order of dimensions. E.g., if input tensor is in NHWC format, then dims /// will be in NCHW format, and not in NHWC format. /// /// @input TensorShape object in shape /// @return memory::dims in MKL-DNN required NCHW format inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex(format, 'N')); int c = shape.dim_size(GetTensorDimIndex(format, 'C')); int h = shape.dim_size(GetTensorDimIndex(format, 'H')); int w = shape.dim_size(GetTensorDimIndex(format, 'W')); // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N')); int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C')); int d = shape.dim_size(GetTensorDimIndex<3>(format, '0')); int h = shape.dim_size(GetTensorDimIndex<3>(format, '1')); int w = shape.dim_size(GetTensorDimIndex<3>(format, '2')); // MKL-DNN requires dimensions in NCDHW format. return memory::dims({n, c, d, h, w}); } /// Overloaded version of function above. Input parameters are /// self-explanatory. inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = in_dims[GetTensorDimIndex(format, 'N')]; int c = in_dims[GetTensorDimIndex(format, 'C')]; int h = in_dims[GetTensorDimIndex(format, 'H')]; int w = in_dims[GetTensorDimIndex(format, 'W')]; // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Map MklDnn memory::dims object into TensorShape object. /// /// This function will simply map input shape in MKL-DNN memory::dims format /// in Tensorflow's TensorShape object by preserving dimension order. /// /// @input MKL-DNN memory::dims object /// @output TensorShape corresponding to memory::dims inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { std::vector<int32> shape(dims.size(), -1); for (int d = 0; d < dims.size(); d++) { shape[d] = dims[d]; } TensorShape ret; CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true); return ret; } /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, /// dimesion with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// /// @input Tensorflow shape in memory::dims type /// @return memory::dims containing strides for the tensor. inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) { CHECK_GT(dims_tf_order.size(), 0); memory::dims strides(dims_tf_order.size()); int last_dim_idx = dims_tf_order.size() - 1; strides[last_dim_idx] = 1; for (int d = last_dim_idx - 1; d >= 0; d--) { strides[d] = strides[d + 1] * dims_tf_order[d + 1]; } return strides; } inline padding_kind TFPaddingToMklDnnPadding(Padding pad) { // MKL-DNN only supports zero padding. return padding_kind::zero; } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype) { CHECK_EQ(dim.size(), strides.size()); // We have to construct memory descriptor in a C style. This is not at all // ideal but MKLDNN does not offer any API to construct descriptor in // blocked format except a copy constructor that accepts // mkldnn_memory_desc_t. mkldnn_memory_desc_t md; md.primitive_kind = mkldnn_memory; md.ndims = dim.size(); md.format = mkldnn_blocked; md.data_type = memory::convert_to_c(dtype); for (size_t i = 0; i < dim.size(); i++) { md.layout_desc.blocking.block_dims[i] = 1; md.layout_desc.blocking.strides[1][i] = 1; md.layout_desc.blocking.strides[0][i] = strides[i]; md.layout_desc.blocking.padding_dims[i] = dim[i]; md.layout_desc.blocking.offset_padding_to_data[i] = 0; md.dims[i] = dim[i]; } md.layout_desc.blocking.offset_padding = 0; return memory::desc(md); } template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to); /* * Class to represent all the resources corresponding to a tensor in TensorFlow * that are required to execute an operation (such as Convolution). */ template <typename T> class MklDnnData { private: /// MKL-DNN memory primitive for input user memory memory* user_memory_; /// MKL-DNN memory primitive in case input or output reorder is needed. memory* reorder_memory_; /// Operations memory descriptor memory::desc* op_md_; // flat to indicate if data is 3D or not. bool bIs3D; /// Operations temp buffer void* allocated_buffer_; /// CPU engine on which operation will be executed const engine* cpu_engine_; public: explicit MklDnnData(const engine* e) : user_memory_(nullptr), reorder_memory_(nullptr), op_md_(nullptr), allocated_buffer_(nullptr), cpu_engine_(e) {} ~MklDnnData() { cpu_engine_ = nullptr; // We don't own this. delete (user_memory_); delete (reorder_memory_); delete (op_md_); } inline void* GetTensorBuffer(const Tensor* tensor) const { CHECK_NOTNULL(tensor); return const_cast<void*>( static_cast<const void*>(tensor->flat<T>().data())); } void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; } bool GetIs3D() { return bIs3D; } /// Set user memory primitive using specified dimensions, memory format and /// data_buffer. Function automatically uses element data type by using /// input type T used for creating call object. /// /// In a nutshell, function allows user to describe the input tensor to /// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and /// memory format HWIO, and the buffer that contains actual values is /// pointed by data_buffer. inline void SetUsrMem(const memory::dims& dim, memory::format fm, void* data_buffer = nullptr) { auto md = memory::desc(dim, MklDnnType<T>(), fm); SetUsrMem(md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, memory::format fm, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, fm, GetTensorBuffer(tensor)); } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim, const memory::dims& strides) { return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>()); } /// A version of SetUsrMem call that allows user to create memory in blocked /// format. So in addition to accepting dimensions, it also accepts strides. /// This allows user to create memory for tensor in a format that is not /// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6 /// dimensional tensor as a native format. But by using blocked format, a user /// can create memory for 6D tensor. inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, void* data_buffer = nullptr) { CHECK_EQ(dim.size(), strides.size()); auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides); SetUsrMem(blocked_md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, strides, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts memory /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) { auto pd = memory::primitive_desc(md, *cpu_engine_); SetUsrMem(pd, data_buffer); } /// A version of SetUsrMem with memory descriptor and tensor inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(md, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts primitive /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::primitive_desc& pd, void* data_buffer = nullptr) { CHECK_NOTNULL(cpu_engine_); // TODO(nhasabni): can we remove dynamic memory allocation? if (data_buffer) { user_memory_ = new memory(pd, data_buffer); } else { user_memory_ = new memory(pd); } } /// A version of SetUsrMem with primitive descriptor and tensor inline void SetUsrMem(const memory::primitive_desc& pd, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(pd, GetTensorBuffer(tensor)); } /// Get function for user memory primitive. inline const memory* GetUsrMem() const { return user_memory_; } /// Get function for primitive descriptor of user memory primitive. inline const memory::primitive_desc GetUsrMemPrimDesc() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_primitive_desc(); } /// Get function for descriptor of user memory. inline memory::desc GetUsrMemDesc() { // This is ugly. Why MKL-DNN does not provide desc() method of const type?? const memory::primitive_desc pd = GetUsrMemPrimDesc(); return const_cast<memory::primitive_desc*>(&pd)->desc(); } /// Get function for data buffer of user memory primitive. inline void* GetUsrMemDataHandle() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_data_handle(); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(void* data_buffer) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(data_buffer); user_memory_->set_data_handle(data_buffer); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(const Tensor* tensor) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(tensor); user_memory_->set_data_handle(GetTensorBuffer(tensor)); } /// allocate function for data buffer inline void AllocateBuffer(size_t size) { const int64 kMemoryAlginment = 64; // For AVX512 memory alignment. allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size); } inline void* GetAllocatedBuffer() { return allocated_buffer_; } /// Get the memory primitive for input and output of an op. If inputs /// to an op require reorders, then this function returns memory primitive /// for reorder. Otherwise, it will return memory primitive for user memory. /// /// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to /// execute Conv2D, we need memory primitive for I and F. Buf if reorder is /// required for I and F (say I_r is reorder primitive for I; F_r is reorder /// primitive for F), then we need I_r and F_r to perform Conv2D. inline const memory& GetOpMem() const { return reorder_memory_ ? *reorder_memory_ : *user_memory_; } /// Set memory descriptor of an operation in terms of dimensions and memory /// format. E.g., For Conv2D, the dimensions would be same as user dimensions /// but memory::format would be mkldnn::any because we want MKL-DNN to choose /// best layout/format for given input dimensions. inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) { // TODO(nhasabni): can we remove dynamic memory allocation? op_md_ = new memory::desc(dim, MklDnnType<T>(), fm); } /// Get function for memory descriptor for an operation inline const memory::desc& GetOpMemDesc() const { return *op_md_; } /// Predicate that checks if we need to reorder user's memory into memory /// pointed by op_pd. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const { CHECK_NOTNULL(user_memory_); return op_pd != user_memory_->get_primitive_desc(); } /// Predicate that checks if we need to reorder user's memory into memory /// based on the provided format. /// /// @input: target_format - memory format of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::format& target_format) const { CHECK_NOTNULL(user_memory_); return target_format != user_memory_->get_primitive_desc().desc().data.format; } /// Function to create a reorder from memory pointed by from to memory pointed /// by to. Returns created primitive. inline primitive CreateReorder(const memory* from, const memory* to) const { CHECK_NOTNULL(from); CHECK_NOTNULL(to); return reorder(*from, *to); } /// Function to handle input reordering /// /// Check if we need to reorder this input of an operation. /// Return true and allocate reorder memory primitive if reorder is needed. /// Otherwise, return false and do not allocate reorder memory primitive. /// /// To check if reorder is needed, this function compares memory primitive /// descriptor of an operation (op_pd) for the given input with the /// user-specified memory primitive descriptor. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately reorder_memory_ = new memory(op_pd); std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Overloaded version of above function that accepts memory buffer /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_data_handle - memory buffer where output of reorder needs to be /// stored. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd, reorder_data_handle); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle) { CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; reorder_memory_ = new memory(op_pd, reorder_data_handle); net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Another overloaded version of CheckReorderToOpMem that accepts Tensor /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_tensor - Tensor whose buffer is to be used to store output of /// reorder. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net); } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor) { CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor)); } /// Function to handle output reorder /// /// This function performs very similar functionality as input reordering /// function above. The only difference is that this function does not add /// reorder primitive to the net. The reason for this is: the reorder /// primitive for output needs to be added to the list only after operation /// has executed. But we need to prepare a temporary buffer in case output /// reorder is needed. And this temporary buffer will hold the output of /// an operation before it is fed to reorder primitive. /// /// @input memory primitive descriptor for the given output of an operation /// @return: true in case reorder of output is needed; false, otherwise. inline bool PrepareReorderToUserMemIfReq( const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); return true; } return false; } /// Function to actually insert reorder primitive in the net /// /// This function completes remaining part of output reordering. It inserts /// a reordering primitive from the temporary buffer that holds the output /// to the user-specified output buffer. /// /// @input: net - net to which to add reorder primitive inline void InsertReorderToUserMem(std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); net->push_back(CreateReorder(reorder_memory_, user_memory_)); } /// TODO: this is a faster path with reorder primitive cache compared with /// InsertReorderToUserMem(std::vector<primitive>* net), will remove /// slow path in the future inline void InsertReorderToUserMem() { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_)); stream(stream::kind::eager).submit(net).wait(); } }; /// Base class for operations with reuse of primitives /// class MklPrimitive { public: virtual ~MklPrimitive() {} // Dummy data which MKL DNN never operates on unsigned char* DummyData = nullptr; }; const mkldnn::memory::dims NONE_DIMS = {}; template <typename T> class MklPrimitiveFactory { public: MklPrimitiveFactory() { } ~MklPrimitiveFactory() {} MklPrimitive* GetOp(const string& key) { auto& map = MklPrimitiveFactory<T>::GetHashMap(); auto stream_iter = map.find(key); if (stream_iter == map.end()) { return nullptr; } else { CHECK(stream_iter->second != nullptr) << "nullptr present in map"; return stream_iter->second; } } void SetOp(const string& key, MklPrimitive* op) { auto& map = MklPrimitiveFactory<T>::GetHashMap(); auto stream_iter = map.find(key); CHECK(stream_iter == map.end()); map[key] = op; } /// Function to decide whether HW has AVX512 or AVX2 /// For those legacy device(w/o AVX512 and AVX2), /// MKL-DNN GEMM will be used. static inline bool IsLegacyPlatform() { return (!port::TestCPUFeature(port::CPUFeature::AVX512F) && !port::TestCPUFeature(port::CPUFeature::AVX2)); } /// Fuction to check whether primitive memory optimization is enabled static inline bool IsPrimitiveMemOptEnabled() { bool is_primitive_mem_opt_enabled = true; TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, &is_primitive_mem_opt_enabled)); return is_primitive_mem_opt_enabled; } private: static inline std::unordered_map<string, MklPrimitive*>& GetHashMap() { static thread_local std::unordered_map<string, MklPrimitive*> map_; return map_; } }; // utility class for creating keys of MKL primitive pool. class FactoryKeyCreator { public: FactoryKeyCreator() { key_.reserve(kMaxKeyLength); } ~FactoryKeyCreator() {} void AddAsKey(const string& str) { Append(str); } void AddAsKey(const mkldnn::memory::dims &dims) { for (unsigned int i = 0; i < dims.size(); i++) { AddAsKey<int>(dims[i]); } } template <typename T> void AddAsKey(const T data) { auto buffer = reinterpret_cast<const char *>(&data); Append(StringPiece(buffer, sizeof(T))); } string GetKey() { return key_; } private: string key_; const char delimiter = 'x'; const int kMaxKeyLength = 256; void Append(StringPiece s) { key_.append(string(s)); key_.append(1, delimiter); } }; static inline memory::format get_desired_format(int channel, bool is_2d = true) { memory::format fmt_desired = memory::format::any; if (port::TestCPUFeature(port::CPUFeature::AVX512F)) { fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c; } else if (port::TestCPUFeature(port::CPUFeature::AVX2) && (channel % 8) == 0) { fmt_desired = is_2d ? memory::format::nChw8c : memory::format::ncdhw; // no avx2 support for 3d yet. } else { fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw; } return fmt_desired; } class MklReorderPrimitive : public MklPrimitive { public: explicit MklReorderPrimitive(const memory* from, const memory* to) { Setup(from, to); } ~MklReorderPrimitive() {} std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; } void SetMemory(const memory* from, const memory* to) { context_.src_mem->set_data_handle(from->get_data_handle()); context_.dst_mem->set_data_handle(to->get_data_handle()); } private: struct ReorderContext { std::shared_ptr<mkldnn::memory> src_mem; std::shared_ptr<mkldnn::memory> dst_mem; std::shared_ptr<primitive> reorder_prim; ReorderContext(): src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) { } } context_; engine cpu_engine_ = engine(engine::cpu, 0); void Setup(const memory* from, const memory* to) { context_.src_mem.reset(new memory( {from->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.dst_mem.reset(new memory( {to->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.reorder_prim = std::make_shared<mkldnn::reorder>( reorder(*context_.src_mem, *context_.dst_mem)); } }; template <typename T> class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> { public: static MklReorderPrimitive* Get(const memory* from, const memory* to) { auto reorderPrim = static_cast<MklReorderPrimitive*>( MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to)); if (reorderPrim == nullptr) { reorderPrim = new MklReorderPrimitive(from, to); MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to, reorderPrim); } reorderPrim->SetMemory(from, to); return reorderPrim; } static MklReorderPrimitiveFactory & GetInstance() { static MklReorderPrimitiveFactory instance_; return instance_; } private: MklReorderPrimitiveFactory() {} ~MklReorderPrimitiveFactory() {} static string CreateKey(const memory* from, const memory* to) { string prefix = "reorder"; FactoryKeyCreator key_creator; auto const &from_desc = from->get_primitive_desc().desc().data; auto const &to_desc = to->get_primitive_desc().desc().data; memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]); memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]); key_creator.AddAsKey(prefix); key_creator.AddAsKey(static_cast<int>(from_desc.format)); key_creator.AddAsKey(static_cast<int>(from_desc.data_type)); key_creator.AddAsKey(from_dims); key_creator.AddAsKey(static_cast<int>(to_desc.format)); key_creator.AddAsKey(static_cast<int>(to_desc.data_type)); key_creator.AddAsKey(to_dims); return key_creator.GetKey(); } MklPrimitive* GetReorder(const memory* from, const memory* to) { string key = CreateKey(from, to); return this->GetOp(key); } void SetReorder(const memory* from, const memory* to, MklPrimitive* op) { string key = CreateKey(from, to); this->SetOp(key, op); } }; /// Fuction to find(or create) a reorder from memory pointed by /// from to memory pointed by to, it will created primitive or /// get primitive from pool if it is cached. /// Returns the primitive. template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to) { CHECK_NOTNULL(from); CHECK_NOTNULL(to); MklReorderPrimitive* reorder_prim = MklReorderPrimitiveFactory<T>::Get(from, to); return *reorder_prim->GetPrimitive(); } // utility function to determine if it is conv 1x1 and stride != 1 // for purpose of temporarily disabling primitive reuse inline bool IsConv1x1StrideNot1(memory::dims filter_dims, memory::dims strides) { if (filter_dims.size() != 4 || strides.size() != 2) return false; return ((filter_dims[2] == 1) && (filter_dims[3] == 1) && ((strides[0] != 1) || (strides[1] != 1))); } #endif // INTEL_MKL_DNN } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** © 2009-2019 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ char *PNGQUANT_USAGE = "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n"; #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) # include <fcntl.h> /* O_BINARY */ # include <io.h> /* setmode() */ #else # include <unistd.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update; ./configure; or add -Ilib to compiler flags */ #include "pngquant_opts.h" char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (July 2019)"; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif void pngquant_internal_print_config(FILE *fd) { fputs("" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , fd); fflush(fd); } FILE *pngquant_c_stderr() { return stderr; } FILE *pngquant_c_stdout() { return stdout; } static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION); pngquant_internal_print_config(fd); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } if (options.print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options.missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options.print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } liq_attr *liq = liq_attr_create(); if (!liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options.iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options.verbose) { liq_set_log_callback(liq, log_callback, NULL); options.log_callback = log_callback; } if (options.last_index_transparent) { liq_set_last_index_transparent(liq, true); } if (options.speed >= 10) { options.fast_compression = true; if (options.speed == 11) { options.floyd = 0; options.speed = 10; } } if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options.extension && options.output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options.extension == NULL) { options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options.output_file_path && options.num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options.using_stdout && !options.using_stdin && options.num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (!options.num_files && !options.using_stdin) { fputs("No input files specified.\n", stderr); if (options.verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } retval = pngquant_main_internal(&options, liq); liq_attr_destroy(liq); return retval; } #endif // Don't use this. This is not a public API. pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq) { if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (options->num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; liq_attr *local_liq = liq_attr_copy(liq); #ifdef _OPENMP struct buffered_log buf = {0}; if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) { liq_set_log_callback(local_liq, log_callback_buferred, &buf); liq_set_log_flush_callback(local_liq, log_callback_buferred_flush, &buf); opts.log_callback = log_callback_buferred; opts.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts, local_liq); } free(outname_free); liq_attr_destroy(local_liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(liq, options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq) { pngquant_error retval = SUCCESS; verbose_printf(liq, options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(liq, options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options, liq); if (TOO_LARGE_FILE == retval) { verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strcpy(outname, filename); if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(liq, options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, strip, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc(output_image->height * output_image->width); output_image->row_pointers = malloc(output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
DRB010-lastprivatemissing-var-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This loop has loop-carried output-dependence due to x=... at line 63. The problem can be solved by using lastprivate(x) . Data race pair: x@63:5 vs. x@63:5 */ #include <stdio.h> #include <stdlib.h> int main(int argc, char * argv[]) { int i, x; int len = 10000; int _ret_val_0; if (argc>1) { len=atoi(argv[1]); } #pragma cetus private(i) #pragma cetus lastprivate(x) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) lastprivate(x) for (i=0; i<len; i ++ ) { x=i; } printf("x=%d", x); _ret_val_0=0; return _ret_val_0; }
omp_counters.h
/* * Copyright (c) 2020, Arm Limited and Contributors. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Auxiliary functions for PAPI performance counters in Barrierpoint */ #ifndef __BP_PERFCNTRS_H__ #define __BP_PERFCNTRS_H__ #ifdef __cplusplus extern "C" { #endif #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <limits.h> #include <sys/types.h> #include <unistd.h> #include <string.h> #include <omp.h> #include <papi.h> int papi_max_num_threads = 0; int papi_event_set; #define MAX_STR_LEN 128 #pragma omp threadprivate(papi_event_set) #define MAX_CNTRS 6 int BP_AVAILABLE_CNTRS = 0; int _BP_PERFCNTRS = 0; int _BP_PERFCNTRS_VERBOSE = 0; int _BP_PERFCNTRS_OMPPARALLEL = 0; int _BP_PERFCNTRS_SAMPLING = 0; int BP_PERFCNTRS_INITIALIZED = 0; int PERF_CNTRS_STARTED = 0; int t_events = 0; char event_output_filename[1024]; int event_codes[MAX_CNTRS]; long long event_values[MAX_CNTRS + 1]; /* +1 is the cycle counter */ int perfc_thread_started = 1; /* Only when instrumenting OMP parallel regions */ #define MAX_PARALLEL_PHASES 25800 unsigned long long event_values_pbarrier[MAX_PARALLEL_PHASES][MAX_CNTRS + 1]; /* +1 is the cycle counter */ int perfc_omp_parallel_region_count = 0; /* Only when instrumenting samplings */ unsigned long long event_values_sample[MAX_CNTRS + 1]; /* +1 is the cycle counter */ #pragma omp threadprivate(perfc_thread_started, perfc_omp_parallel_region_count, \ event_values, event_values_pbarrier, event_values_sample) /****************************** AUX FUNCTIONS ******************************/ inline __attribute__((always_inline)) int env2int(const char *name){ const char *val = getenv(name); if(val) return strtol(val, NULL, 0); return 0; } inline __attribute__((always_inline)) void _papi_error(char *error_str){ PAPI_perror(error_str); exit(2); } /**************************** END AUX FUNCTIONS ****************************/ /* Set performance counters. This function must be called before starting any instrumentation */ inline __attribute__((always_inline)) void initPerformanceCounters(){ #pragma omp master { if (BP_PERFCNTRS_INITIALIZED == 0) { BP_PERFCNTRS_INITIALIZED = 1; _BP_PERFCNTRS = env2int("BP_PERFCNTRS"); _BP_PERFCNTRS_VERBOSE = env2int("BP_PERFCNTRS_VERBOSE"); _BP_PERFCNTRS_OMPPARALLEL = env2int("BP_PERFCNTRS_OMPPARALLEL"); _BP_PERFCNTRS_SAMPLING = env2int("BP_PERFCNTRS_SAMPLING"); /* Ensure the maximum number of counters is respected */ if (_BP_PERFCNTRS) { BP_AVAILABLE_CNTRS = PAPI_num_counters(); if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Initializing\n \ [OMP PerfCntrs] Number of available counters: " << BP_AVAILABLE_CNTRS << std::endl; if (BP_AVAILABLE_CNTRS > MAX_CNTRS) { BP_AVAILABLE_CNTRS = MAX_CNTRS; std::cout << "[OMP PerfCntrs] This module only supports up to " << MAX_CNTRS << " counters\n"; } if (_BP_PERFCNTRS_SAMPLING && _BP_PERFCNTRS_OMPPARALLEL) { std::cout << "[OMP PerfCntrs] Only Sampling-based or Parallel Region-based \ can be enabled, switching to Parallel Region-based\n"; _BP_PERFCNTRS_SAMPLING = 0; } /* Output file to store the results */ char *_output_filename = getenv("BP_PERFCNTRS_OUTPUT_FILE"); if (_output_filename) strcpy(event_output_filename, _output_filename); else strcpy(event_output_filename, "/tmp/perfcntrs_events.out"); if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Dumping results in: " << event_output_filename << std::endl; if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT) _papi_error((char *)"[OMP PerfCntrs] PAPI_library_init error"); papi_max_num_threads = omp_get_max_threads(); if (PAPI_thread_init((long unsigned int (*)()) omp_get_thread_num) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_thread_init error"); /* Get the events; specify events separated by commas */ char *events_str_ptr = getenv("BP_PERFCNTRS_EVENTS"); char events_str[1024]; if (events_str_ptr) strcpy(events_str, events_str_ptr); else{ strcpy(events_str, "PAPI_TOT_INS,PAPI_TOT_CYC"); /* Default counters */ std::cout << "[OMP PerfCntrs] No PAPI counters set. Using the default counters: \ PAPI_TOT_INS and PAPI_TOT_CYC\n"; } char *event = strtok(events_str, ","); while (event) { if (t_events == BP_AVAILABLE_CNTRS) { std::cerr << "[OMP PerfCntrs] Too many events!\n"; exit(1); } if (PAPI_event_name_to_code(event, &event_codes[t_events]) != PAPI_OK){ std::cerr << "[OMP PerfCntrs] Invalid PAPI event: " << event << std::endl; exit(1); } t_events++; event = strtok(NULL, ","); } if (_BP_PERFCNTRS_VERBOSE) { std::cout << "[OMP PerfCntrs] Registering " << t_events << " events\n"; char event_names[MAX_CNTRS + 1][MAX_STR_LEN]; int i; for (i = 0; i < t_events; i++) { PAPI_event_code_to_name(event_codes[i], event_names[i]); std::cout << " " << event_names[i] << ","; } std::cout << "\n"; } } } /* end if _BP_PERFCNTRS_INITIALIZED */ } /* end of pragma omp master */ #pragma omp barrier if (_BP_PERFCNTRS) { /* Init all threads */ #pragma omp parallel { int i; if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Thread " << omp_get_thread_num() << " in its init phase\n"; papi_event_set = PAPI_NULL; if (PAPI_create_eventset(&papi_event_set) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_create_eventset"); for (i = 0; i < t_events; i++) if (PAPI_add_event(papi_event_set, event_codes[i]) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_add_event"); } } } /* Initalize and reset performance counters */ inline __attribute__((always_inline)) void startPerformanceCounters(){ if (_BP_PERFCNTRS) { #pragma omp parallel { if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Thread " << omp_get_thread_num() << " in its start phase\n"; perfc_thread_started = 1; perfc_omp_parallel_region_count = 0; int rc = PAPI_start(papi_event_set); if (rc == PAPI_EISRUN) { if (PAPI_stop(papi_event_set, event_values) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_stop in startPerformanceCounters"); if (PAPI_reset(papi_event_set) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_reset in startPerformanceCounters"); rc = PAPI_start(papi_event_set); } if (rc != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_start in startPerformanceCounters"); } PERF_CNTRS_STARTED = 1; } } /* Stop instrumentation. Store the values in the output file */ inline __attribute__((always_inline)) void stopPerformanceCounters(){ if (_BP_PERFCNTRS) { if (!PERF_CNTRS_STARTED) std::cout << "[OMP PerfCntrs] Trying to stop non-started counters\n"; #pragma omp parallel { if (!perfc_thread_started) { std::cout << "[OMP PerfCntrs] Trying to stop non-started counters (thread " << omp_get_thread_num() << ")\n"; } /* Get the counters values */ int i, j; if (PAPI_stop(papi_event_set, event_values) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_stop in stopPerformanceCounters"); int tid = omp_get_thread_num(); int pid = getpid(); if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Thread " << tid << " in its stop phase\n"; FILE *outfile = fopen(event_output_filename, "a"); if (!outfile){ std::cerr << "[OMP PerfCntrs] Could not create output file\n"; exit(1); } char event_names[MAX_CNTRS + 1][MAX_STR_LEN]; for (i = 0; i < t_events; i++) if (PAPI_event_code_to_name(event_codes[i], event_names[i]) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_event_code_to_name in stopPerformanceCounters"); #pragma omp critical { if (_BP_PERFCNTRS_OMPPARALLEL) { for (j = 0; j < perfc_omp_parallel_region_count; j++) for (i = 0; i < t_events; i++) fprintf(outfile, "%s[%d][%d][%d]=%lld\n", event_names[i], tid, \ pid, j, event_values_pbarrier[j][i]); } /* When BP_PERFCNTRS_OMPPARALLEL is set, the last phase goes from * the last "omp parallel" to the end of the region of interest. * Otherwise perfc_omp_parallel_region_count will be 0. */ for (i = 0; i < t_events; i++) fprintf(outfile, "%s[%d][%d][%d]=%lld\n", event_names[i], tid, pid, perfc_omp_parallel_region_count, event_values[i]); } fclose(outfile); perfc_thread_started = 0; } PERF_CNTRS_STARTED = 0; } } /* Store the performance counters values of the parallel region and do a reset */ inline __attribute__((always_inline)) void parallelRegionPerformanceCounters(){ if (_BP_PERFCNTRS && _BP_PERFCNTRS_OMPPARALLEL && PERF_CNTRS_STARTED) { /* We might have parallel regions BEFORE the region of interest, so * it is easier to check if the instrumentation has started here */ /* Check for a nested parallelism */ if(omp_in_parallel() == 1){ if (!perfc_thread_started) std::cout << "[OMP PerfCntrs] Trying to restart non-started counters (thread " << omp_get_thread_num() << ")\n"; /* Get the counters values */ int i; if (PAPI_read(papi_event_set, event_values) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_read in parallelRegionPerformanceCounters"); int tid = omp_get_thread_num(); if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Thread " << tid << " saw an OMP parallel region\n"; if (perfc_omp_parallel_region_count < MAX_PARALLEL_PHASES) for (i = 0; i < t_events; i++) event_values_pbarrier[perfc_omp_parallel_region_count][i] = event_values[i]; else std::cout << "[OMP PerfCntrs] Run out of space for storing intermediate values \ (thread " << tid << ", region " << perfc_omp_parallel_region_count << ")\n"; perfc_omp_parallel_region_count++; if (PAPI_reset(papi_event_set) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_reset in parallelRegionPerformanceCounters"); }else{ /* No nested parallelism */ #pragma omp parallel { if (!perfc_thread_started) { std::cout << "[OMP PerfCntrs] Trying to restart non-started counters \ (thread " << omp_get_thread_num() << ")\n"; } /* Get the counters values */ int i; if (PAPI_read(papi_event_set, event_values) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_read in parallelRegionPerformanceCounters"); int tid = omp_get_thread_num(); if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Thread " << tid << " saw an OMP parallel region\n"; if (perfc_omp_parallel_region_count < MAX_PARALLEL_PHASES) for (i = 0; i < t_events; i++) event_values_pbarrier[perfc_omp_parallel_region_count][i] = event_values[i]; else std::cout << "[OMP PerfCntrs] Run out of space for storing intermediate values \ (thread " << tid << ", region " << perfc_omp_parallel_region_count << ")\n"; perfc_omp_parallel_region_count++; if (PAPI_reset(papi_event_set) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_reset in parallelRegionPerformanceCounters"); } } } } /* Store the performance counters values of the barrier region and do a reset */ inline __attribute__((always_inline)) void barrierRegionPerformanceCounters(){ if (_BP_PERFCNTRS && _BP_PERFCNTRS_OMPPARALLEL && PERF_CNTRS_STARTED) { /* We might have parallel regions BEFORE the region of interest, so * it is easier to check if the instrumentation has started here */ if (!perfc_thread_started) { std::cout << "[OMP PerfCntrs] Trying to restart non-started counters (thread " << omp_get_thread_num() << ")\n"; } /* Get the counters values */ int i; if (PAPI_read(papi_event_set, event_values) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_read in parallelRegionPerformanceCounters"); int tid = omp_get_thread_num(); if (_BP_PERFCNTRS_VERBOSE) std::cout << "[OMP PerfCntrs] Thread " << tid << " saw an OMP parallel region\n"; if (perfc_omp_parallel_region_count < MAX_PARALLEL_PHASES) for (i = 0; i < t_events; i++) event_values_pbarrier[perfc_omp_parallel_region_count][i] = event_values[i]; else std::cout << "[OMP PerfCntrs] Run out of space for storing intermediate values \ (thread " << tid << ", region " << perfc_omp_parallel_region_count << ")\n"; perfc_omp_parallel_region_count++; if (PAPI_reset(papi_event_set) != PAPI_OK) _papi_error((char *)"[OMP PerfCntrs] PAPI_reset in parallelRegionPerformanceCounters"); } } #ifdef __cplusplus } #endif #endif /* __BP_PERFCNTRS_H__ */
GibbsSampler.h
// // GibbsSampler.h // elly // // Created by Ce Zhang on 7/28/12. // Copyright (c) 2012 University of Wisconsin-Madison. All rights reserved. // /** * * \file This file contains functions that sample assignments * for a single variable. * **/ #ifndef elly_GibbsSampler_h #define elly_GibbsSampler_h #include "../utils/Common.h" #include "../factors/factor_inits.h" /** * \brief Just an umbralla namespace for elementary/storage manager etc. **/ namespace mia { /** * \brief Namespace for Elementary. **/ namespace elly{ /** * \brief Namespace for sampling algorithms. */ namespace alg{ /** * Randomly shuffle the assignment of a mia::elly::SampleInput object. * * \param sampleInput reference to a sample task * * \return assigned value to mia::elly::SampleInput. * * \sa mia::elly::SampleInput **/ int Shuffle(mia::elly::SampleInput & sampleInput){ //mia::elly::utils::log() << ">>> Suffle variable ID=" << sampleInput.vid << std::endl; int targetValue = rand() % sampleInput.vdomain; //mia::elly::utils::log() << " | V" << sampleInput.vid << " <~~ " << targetValue << " from " << sampleInput.vvalue << std::endl; return targetValue; } /** * Sample new assignment of a mia::elly::SampleInput object. * Also, mia::elly::SampleInput::log_improve_ratio will be filled * in by the improvement of probability of new assignment * compared with original assignment. If mia::elly::SampleInput * contains training data (mia::elly::sampleInput::vtrain>0), also * update weights by calling gradient function for each factor. * * \param sampleInput reference to a sample task * \param thread_id a number in 0~#threads * \param vector_pool object pool to avoid re-allocate of vectors, vector_pool[thread_id] is a vector. * \param is_log_system true if all factors return in log scale, otherwise linear scale (weight vs. exp(weight) ). * * \return assigned value to mia::elly::SampleInput. Also the * * \sa mia::elly::SampleInput **/ int GibbsSampling(mia::elly::SampleInput & sampleInput, int thread_id, std::vector<double>* vector_pool, bool is_log_system = true){ //mia::elly::utils::log() << ">>> Sample variable ID=" << sampleInput.vid << std::endl; // for each factor int crid, fid, aux, vpos, funcid, aux2; void* mb; std::vector<double>* weights; std::vector<double>* potentials = &vector_pool[thread_id*3]; //std::vector<double>* upper_ratios = &vector_pool[thread_id*3+1]; //std::vector<double>* lower_ratios = &vector_pool[thread_id*3+2]; //std::vector<double> potentials; if(sampleInput.vdomain > potentials->size()){ for(int i=potentials->size()-1; i<sampleInput.vdomain; i++ ){ potentials->push_back(0); //if(has_linear_upper_bound){ // upper_ratios->push_back(0); // lower_ratios->push_back(0); //} } } for(int i=0;i<sampleInput.vdomain;i++){ if(is_log_system){ potentials->at(i) = 0; }else{ potentials->at(i) = 1; } //if(has_linear_upper_bound){ // upper_ratios->at(i) = 1; // lower_ratios->at(i) = 1; //} } for(int nf=0; nf<sampleInput.fids.size();nf ++){ crid = sampleInput.crids[nf]; fid = sampleInput.fids[nf]; aux = sampleInput.auxs[nf]; aux2 = sampleInput.aux2s[nf]; mb = sampleInput.mbs[nf]; vpos = sampleInput.pos_of_sample_variable[nf]; funcid = sampleInput.funcids[nf]; weights = sampleInput.weights[nf]; //#pragma omp parallel for for(int value=0; value < sampleInput.vdomain; value ++){ double potential = funcs_potential[funcid](mb, aux, aux2, vpos, value, weights); //std::cout << "funcid = " << funcid << ", vpos = " << vpos << "; value = " << value << ": " << potential << std::endl; if(is_log_system){ potentials->at(value) += potential; }else{ potentials->at(value) *= potential; //if(nf == 2){ // todo: add in#threads //if(funcs_upper[funcid] != NULL){ // upper_ratios->at(value) *= funcs_upper[funcid](mb, aux, aux2, vpos, value, weights); // // lower_ratios->at(value) *= potential; //}else{ // upper_ratios->at(value) *= funcs_upper[funcid](mb, aux, aux2, vpos, value, weights); // // lower_ratios->at(value) *= potential; //} //} //std::cout << "~~~" << potential << std::endl; } } } //for(int value=0; value < sampleInput.vdomain; value ++){ // std::cout << lower_ratios->at(value) << ", " << potentials->at(value) << ", " << upper_ratios->at(value) << std::endl; //} double pfunc; if(is_log_system){ pfunc = -10000000; }else{ pfunc = 0; } for(int value=0; value < sampleInput.vdomain; value ++){ if(is_log_system){ pfunc = mia::elly::utils::logadd(potentials->at(value), pfunc); }else{ pfunc += potentials->at(value); } } //mia::elly::utils::log() << " | log potential func = " << pfunc << std::endl; double random = drand48(); //mia::elly::utils::log() << " | random = " << random << std::endl; double accum = 0; double pmeta = 0; int targetValue = -1; for(int value=0; value < sampleInput.vdomain; value ++){ if(is_log_system){ pmeta = exp( potentials->at(value) - pfunc ); }else{ pmeta = potentials->at(value)/pfunc; } //mia::elly::utils::log() << " + Pr[V" << sampleInput.vid << " = " << value << "] = " << pmeta << std::endl; accum += pmeta; if(random < accum){ targetValue = value; break; } } //if(targetValue == -1){ // std::cout << "accum = " << accum << std::endl; //} assert(targetValue != -1); if(is_log_system){ sampleInput.log_improve_ratio = potentials->at(targetValue) - potentials->at(sampleInput.vvalue); }else{ sampleInput.log_improve_ratio = potentials->at(targetValue) /potentials->at(sampleInput.vvalue); } double gradient; if(sampleInput.vtrain >=0){ //std::cout << "sample = " << targetValue << "; training = " << sampleInput.vtrain << std::endl; for(int nf=0; nf<sampleInput.fids.size();nf ++){ crid = sampleInput.crids[nf]; fid = sampleInput.fids[nf]; aux = sampleInput.auxs[nf]; aux2 = sampleInput.aux2s[nf]; mb = sampleInput.mbs[nf]; vpos = sampleInput.pos_of_sample_variable[nf]; funcid = sampleInput.funcids[nf]; weights = sampleInput.weights[nf]; gradient = funcs_gradient[funcid](mb, aux, aux2, vpos, sampleInput.vvalue, sampleInput.vtrain, weights, sampleInput.stepSize); } } //mia::elly::utils::log() << " | V" << sampleInput.vid << " <~~ " << targetValue << " from " << sampleInput.vvalue << std::endl; return targetValue; } } } } #endif
SpatialFullConvolution.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialFullConvolution.c" #else static int nn_(SpatialFullConvolution_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); int dimw = 2; int dimh = 1; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected"); if (input->nDimension == 4) { dimw++; dimh++; } { long nOutputPlane = weight->size[1]; long kW = weight->size[3]; long kH = weight->size[2]; long inputWidth = input->size[dimw]; long inputHeight = input->size[dimh]; long outputWidth = (inputWidth - 1) * dW + kW; long outputHeight = (inputHeight - 1) * dH + kH; if (input->nDimension == 3) { long i; real* bias_data; real* output_data; THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); /* add bias */ bias_data = THTensor_(data)(bias); output_data = THTensor_(data)(output); #pragma omp parallel for private(i) for (i=0; i<bias->size[0]; i++) { real *ptr_output = output_data + i*outputWidth*outputHeight; long j; for(j = 0; j < outputWidth*outputHeight; j++) ptr_output[j] = bias_data[i]; } /* do convolutions */ { THTensor *tweight = THTensor_(newTranspose)(weight,0,1); THTensor_(conv2Dmv)(output, 1.0, 1.0, input, tweight, dH, dW, "F", "C"); THTensor_(free)(tweight); } } else { real* bias_data; real* output_data; long p; THTensor_(resize4d)(output, input->size[0], nOutputPlane, outputHeight, outputWidth); bias_data = THTensor_(data)(bias); output_data = THTensor_(data)(output); #pragma omp parallel for private(p) for (p=0; p<input->size[0]; p++) { /* BIAS */ long i; for (i=0; i<bias->size[0]; i++) { real *ptr_output = output_data + p*nOutputPlane*outputWidth*outputHeight + i*outputWidth*outputHeight; long j; for(j = 0; j < outputWidth*outputHeight; j++) ptr_output[j] = bias_data[i]; } } /* do convolutions */ { THTensor *tweight = THTensor_(newTranspose)(weight,0,1); THTensor_(conv2Dmm)(output, 1.0, 1.0, input, tweight, dH, dW, "F", "C"); THTensor_(free)(tweight); } } } return 1; } static int nn_(SpatialFullConvolution_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); long nOutputPlane = weight->size[1]; THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); if (input->nDimension == 3) { /* gradient to input */ THTensor_(conv2Dmv)(gradInput, 0.0, 1.0, gradOutput, weight, dH, dW, "V", "X"); } else { /* gradient to input */ THTensor_(conv2Dmm)(gradInput, 0.0, 1.0, gradOutput, weight, dH, dW, "V", "X"); } return 1; } static int nn_(SpatialFullConvolution_accGradParameters)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); real scale = luaL_optnumber(L, 4, 1); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor); THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor); long nOutputPlane = weight->size[1]; int dimw = 2; int dimh = 1; real *gradBias_data; real *gradOutput_data; long noutSlice; THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); if (input->nDimension == 4) { dimw++; dimh++; } /* gradient to bias */ gradBias_data = THTensor_(data)(gradBias); gradOutput_data = THTensor_(data)(gradOutput); noutSlice = gradOutput->size[dimh]*gradOutput->size[dimw]; /*THTensor* gradOutSlice = THTensor_(new)();*/ if (input->nDimension == 3) { long k; #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { /*THTensor_(select)(gradOutSlice, gradOutput, 0, k);*/ real *ptr_gradOutput = gradOutput_data + k*noutSlice; long l; for(l = 0; l < noutSlice; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; } /* gradient to kernels */ THTensor_(conv2DRevger)(gradWeight, 1.0, scale, gradOutput, input, dH, dW); } else { long k; #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { long p; for(p = 0; p < input->size[0]; p++) { /* BIAS */ real *ptr_gradOutput = gradOutput_data + p*nOutputPlane*noutSlice + k*noutSlice; long l; for(l = 0; l < noutSlice; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; } } /* gradient to kernels */ THTensor_(conv2DRevgerm)(gradWeight, 1.0, scale, gradOutput, input, dH, dW); } return 0; } static const struct luaL_Reg nn_(SpatialFullConvolution__) [] = { {"SpatialFullConvolution_updateOutput", nn_(SpatialFullConvolution_updateOutput)}, {"SpatialFullConvolution_updateGradInput", nn_(SpatialFullConvolution_updateGradInput)}, {"SpatialFullConvolution_accGradParameters", nn_(SpatialFullConvolution_accGradParameters)}, {NULL, NULL} }; static void nn_(SpatialFullConvolution_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(SpatialFullConvolution__), "nn"); lua_pop(L,1); } #endif
matmult_initialize.c
#include "matmult_initialize.h" void initialize(double **matrix, int rows, int cols) { int i,j; #pragma omp parallel private(i,j) shared(matrix) { //set_num_threads(); /*** Initialize matrices ***/ #pragma omp for nowait for (i=0; i<rows; i++) { for (j=0; j<cols; j++) { matrix[i][j]= i+j; } } } }
pyfr_gemm_cm.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <mkl.h> #include <libxsmm.h> static double sec(struct timeval start, struct timeval end) { return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6; } int main(int argc, char *argv[]) { int n,m,k; int lda,ldb,ldc; double* a; double* b; double* c1; double* c2; struct timeval l_start, l_end; double l_total = 0.0; int reps, i, j; const int nblock = 16; double alpha = 1.0, beta = 1.0; char transa = 'N', transb = 'N'; libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE; libxsmm_dmmfunction kernel = NULL; if (argc != 5) { fprintf(stderr, "Invalid ./a,out M N K reps\n"); exit(-1); } m = atoi(argv[1]); n = atoi(argv[2]); k = atoi(argv[3]); reps = atoi(argv[4]); /* this is col-major what you want to use for the sizes in question */ lda = m; ldb = k; ldc = m; if (n % nblock != 0) { fprintf(stderr, "N needs to be divisable by %i\n", nblock); exit(-1); } a = (double*)_mm_malloc(lda*k*sizeof(double), 64); b = (double*)_mm_malloc(ldb*n*sizeof(double), 64); c1 = (double*)_mm_malloc(ldc*n*sizeof(double), 64); c2 = (double*)_mm_malloc(ldc*n*sizeof(double), 64); #pragma omp parallel for for (i = 0; i < lda*k; i++) { a[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldb*n; i++) { b[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldc*n; i++) { c1[i] = 0; c2[i] = 0; } /* JIT Kernel */ kernel = libxsmm_dmmdispatch(m, nblock, k, NULL, NULL, NULL, NULL, NULL, NULL, &l_prefetch_op ); /* init MKL */ dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); #pragma omp parallel for for (i = 0; i < ldc*n; i++) { c1[i] = 0; c2[i] = 0; } gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { #pragma omp parallel for private(i) for ( i = 0; i < n; i+=nblock) { kernel( a, b+(ldb*i), c2+(ldc*i), NULL, NULL, NULL ); } gettimeofday(&l_end, NULL); } l_total = sec(l_start, l_end); fprintf(stdout, "time[s] libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); /* test result */ double max_error = 0.0; for ( i = 0; i < ldc*n; i++) { if (max_error < fabs(c1[i] - c2[i])) { max_error = fabs(c1[i] - c2[i]); } } printf("max error: %f\n\n", max_error); }
GxB_BinaryOp_ztype_name.c
//------------------------------------------------------------------------------ // GxB_BinaryOp_ztype_name: return the type_name of y for z=f(x,y) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_BinaryOp_ztype_name // return the name of the type of x ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_BinaryOp binaryop ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_BinaryOp_ztype_name (type_name, op)") ; GB_RETURN_IF_NULL (type_name) ; GB_RETURN_IF_NULL_OR_FAULTY (binaryop) ; ASSERT_BINARYOP_OK (binaryop, "binaryop for ztype_name", GB0) ; //-------------------------------------------------------------------------- // get the type_name //-------------------------------------------------------------------------- memcpy (type_name, binaryop->ztype->name, GxB_MAX_NAME_LEN) ; #pragma omp flush return (GrB_SUCCESS) ; }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; class OpenMPIRBuilder; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Manages list of nontemporal decls for the specified directive. class UntiedTaskLocalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> &LocalVars); ~UntiedTaskLocalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; } protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// An OpenMP-IR-Builder instance. llvm::OpenMPIRBuilder OMPBuilder; /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Maps function to the position of the untied task locals stack. llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; using UntiedLocalVarsAddressesMap = llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>; llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Get the function for the specified user-defined mapper. If it does not /// exist, create one. llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; /// Set to true if Clang emits separate runtime calls for the beginning and /// end of the region. These calls might have separate map type arrays. bool SeparateBeginEndCalls = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library for the beginning /// of the region or for the entire region if there are no separate map /// types for the region end. llvm::Value *MapTypesArray = nullptr; /// The array of map types passed to the runtime library for the end of the /// region, or nullptr if there are no separate map types for the region /// end. llvm::Value *MapTypesArrayEnd = nullptr; /// The array of user-defined mappers passed to the runtime library. llvm::Value *MappersArray = nullptr; /// Indicate whether any user-defined mapper exists. bool HasMapper = false; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls) : RequiresDevicePointerInfo(RequiresDevicePointerInfo), SeparateBeginEndCalls(SeparateBeginEndCalls) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; MapTypesArrayEnd = nullptr; MappersArray = nullptr; HasMapper = false; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } bool separateBeginEndCalls() { return SeparateBeginEndCalls; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); /// Returns true if the variable is a local variable in untied task. bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const; }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
DRB050-functionparameter-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Arrays passed as function parameters */ void foo1(double o1[], double c[], int len) { int i ; for (i = 0; i < len; ++i) { double volnew_o8 = 0.5 * c[i]; o1[i] = volnew_o8; } } int main() { double o1[101]; double c[101]; int i; int len = 100; #pragma omp parallel for private(i ) for (i = 0; i < len; ++i) { c[i] = i + 1.01; o1[i] = i + 1.01; } foo1 (&o1[1], &o1[0], 100); for (i = 0; i < len; ++i) { printf("%lf\n",o1[i]); } return 0; }
par_mgr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-7; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } /* AMG for Frelax */ if(mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i=1; i < num_coarse_levels+1; i++) { if (mgr_data -> F_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); if (mgr_data -> U_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } if (mgr_data -> use_default_fsolver) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if(mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } if (mgr_data -> use_default_fsolver) { if ((mgr_data -> aff_solver)[0]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i=1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels <= 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array+i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker_ptr != NULL) { hypre_TFree(*CF_marker_ptr, HYPRE_MEMORY_HOST); } CF_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(CF_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &CF_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(CF_marker[row] == CMRK) continue; CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(CF_marker[row] == CMRK) nc++; else if(CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker_ptr = CF_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_offd_data[j] *= factor; } } return(0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm,&my_id); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i+1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i==i1 ) { D_ff_inv[i] = -1.0/A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P,P_mod,1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); // Allow for maximum dropping with Pmax = 0 //if (Pmax > 0) //{ if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i+1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i+1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt-1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i+1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i+1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) hypre_TFree(A_h_correction_offd_i, HYPRE_MEMORY_HOST); if (A_h_correction_offd_j) hypre_TFree(A_h_correction_offd_j, HYPRE_MEMORY_HOST); if (A_h_correction_offd_data) hypre_TFree(A_h_correction_offd_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering is not supported at the moment\n"); exit(-1); } //} //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParcsrAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetPartitioningOwner(e1_vector,0); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetPartitioningOwner(e2_vector,0); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetPartitioningOwner(e3_vector,0); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetPartitioningOwner(e4_vector,0); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetPartitioningOwner(e5_vector,0); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector,1,"Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector,1,"Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) hypre_TFree(droptol, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; //HYPRE_Int jj_begin_row,jj_begin_row_offd; //HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; //HYPRE_Int *coarse_counter; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i,jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp"); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); //hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); //HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ //coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { //jj_count[j]++; //fine_to_coarse[i] = coarse_counter[j]; //coarse_counter[j]++; jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //jj_count[j]++; jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //jj_count_offd[j]++; jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ /* for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; */ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /* if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } */ /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ /* if (num_procs > 1) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { fine_to_coarse[i] += my_first_cpt; } comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); if (!comm_pkg) { hypre_MatvecCommPkgCreate(minus_Wp); comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } */ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //P_marker_offd[row_counter] = jj_counter_offd; P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } //hypre_printf("Num rows of Wp = %d\n", row_counter); //P_offd_i[row_counter] = jj_counter_offd; P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } /* num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_minus_Wp_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; col_map_offd_P[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } */ if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); //hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd); *P_ptr = P; //hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); //hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); //hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { //HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Interpolation for each level */ if (interp_type <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,interp_type,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 99) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real S_commpkg_switch, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; HYPRE_Int *col_offd_ST_to_AT = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } if (restrict_type > 5) { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* use appropriate communication package for Strength matrix */ if (strong_threshold > S_commpkg_switch) hypre_BoomerAMGCreateSCommPkg(AT, ST, &col_offd_ST_to_AT); } /* Interpolation for each level */ if (restrict_type == 0) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); } else if (restrict_type == 1 || restrict_type == 2) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else if (restrict_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_ST_to_AT, &R_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, // jac_trunc_threshold, jac_trunc_threshold_minus); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); if (col_offd_ST_to_AT) hypre_TFree(col_offd_ST_to_AT, HYPRE_MEMORY_HOST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_blockRelax_solve(A,f,u,blk_size,n_block,left_size,method,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*fine_grid_solver_setup)(void*,void*,void*,void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> use_default_fsolver) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata , HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *coarse_dof_func_ptr = NULL; HYPRE_BigInt *num_row_cpts_global = NULL; HYPRE_BigInt *num_col_cpts_global = NULL; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, row_cf_marker, &coarse_dof_func_ptr, &num_row_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs -1)) total_global_row_cpts = num_row_cpts_global[1]; hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_row_cpt = num_row_cpts_global[my_id]; total_global_row_cpts = num_row_cpts_global[num_procs]; #endif /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, col_cf_marker, &coarse_dof_func_ptr, &num_col_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs -1)) total_global_col_cpts = num_col_cpts_global[1]; hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_col_cpt = num_col_cpts_global[my_id]; total_global_col_cpts = num_col_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; coarse_counter[i+1] += coarse_counter[i]; col_coarse_counter[i+1] += col_coarse_counter[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, HYPRE_MEMORY_HOST); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = col_coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) Ablock_marker[i] = 0; num_cols_Ablock_offd = 0; for (i=0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); #ifdef HYPRE_NO_GLOBAL_PARTITION /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } #endif *A_block_ptr= Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return(0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if((mgr_data -> use_default_fsolver) >= 0) { hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> use_default_fsolver)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols=1, prows=1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows+1), HYPRE_MEMORY_HOST); for(i=0; i<(num_rows+1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu),global_num_rows,global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local),big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows*pcols <= num_procs) ++prows; --prows; pcols = num_procs/prows; while (prows*pcols != num_procs) { prows -= 1; pcols = num_procs/prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd foo void test_no_clause() { int i; #pragma omp parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}} #pragma omp parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel #pragma omp parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
threadpool.h
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ /* Modifications Copyright (c) Microsoft. */ #pragma once #include <string> #include <vector> #include <functional> #include <memory> #include "core/common/common.h" #include "core/platform/env.h" #include "core/common/optional.h" #include <functional> #include <memory> // This file use PIMPL to avoid having eigen headers here namespace Eigen { class Allocator; class ThreadPoolInterface; } // namespace Eigen namespace onnxruntime { struct TensorOpCost { double bytes_loaded; double bytes_stored; double compute_cycles; }; template <typename Environment> class ThreadPoolTempl; namespace concurrency { class ThreadPool { public: // Scheduling strategies for ParallelFor. The strategy governs how the given // units of work are distributed among the available threads in the // threadpool. enum class SchedulingStrategy { // The Adaptive scheduling strategy adaptively chooses the shard sizes based // on the cost of each unit of work, and the cost model of the underlying // threadpool device. // // The 'cost_per_unit' is an estimate of the number of CPU cycles (or // nanoseconds if not CPU-bound) to complete a unit of work. Overestimating // creates too many shards and CPU time will be dominated by per-shard // overhead, such as Context creation. Underestimating may not fully make // use of the specified parallelism, and may also cause inefficiencies due // to load balancing issues and stragglers. kAdaptive, // The Fixed Block Size scheduling strategy shards the given units of work // into shards of fixed size. In case the total number of units is not // evenly divisible by 'block_size', at most one of the shards may be of // smaller size. The exact number of shards may be found by a call to // NumShardsUsedByFixedBlockSizeScheduling. // // Each shard may be executed on a different thread in parallel, depending // on the number of threads available in the pool. Note that when there // aren't enough threads in the pool to achieve full parallelism, function // calls will be automatically queued. kFixedBlockSize }; // Contains additional parameters for either the Adaptive or the Fixed Block // Size scheduling strategy. class SchedulingParams { public: explicit SchedulingParams(SchedulingStrategy strategy, optional<int64_t> cost_per_unit, optional<std::ptrdiff_t> block_size) : strategy_(strategy), cost_per_unit_(cost_per_unit), block_size_(block_size) { } SchedulingStrategy strategy() const { return strategy_; } optional<int64_t> cost_per_unit() const { return cost_per_unit_; } optional<std::ptrdiff_t> block_size() const { return block_size_; } private: // The underlying Scheduling Strategy for which this instance contains // additional parameters. SchedulingStrategy strategy_; // The estimated cost per unit of work in number of CPU cycles (or // nanoseconds if not CPU-bound). Only applicable for Adaptive scheduling // strategy. optional<int64_t> cost_per_unit_; // The block size of each shard. Only applicable for Fixed Block Size // scheduling strategy. optional<std::ptrdiff_t> block_size_; }; #ifdef _WIN32 using NAME_CHAR_TYPE = wchar_t; #else using NAME_CHAR_TYPE = char; #endif // Constructs a pool that contains "num_threads" threads with specified // "name". env->StartThread() is used to create individual threads with the // given ThreadOptions. If "low_latency_hint" is true the thread pool // implementation may use it as a hint that lower latency is preferred at the // cost of higher CPU usage, e.g. by letting one or more idle threads spin // wait. Conversely, if the threadpool is used to schedule high-latency // operations like I/O the hint should be set to false. // // REQUIRES: num_threads > 0 // The allocator parameter is only used for creating a Eigen::ThreadPoolDevice to be used with Eigen Tensor classes. ThreadPool(Env* env, const ThreadOptions& thread_options, const NAME_CHAR_TYPE* name, int num_threads, bool low_latency_hint); // Constructs a pool that wraps around the thread::ThreadPoolInterface // instance provided by the caller. Caller retains ownership of // `user_threadpool` and must ensure its lifetime is longer than the // ThreadPool instance. ThreadPool(Eigen::ThreadPoolInterface* user_threadpool); // Waits until all scheduled work has finished and then destroy the // set of threads. ~ThreadPool(); // Schedules fn() for execution in the pool of threads. void Schedule(std::function<void()> fn); // Returns the number of shards used by ParallelForFixedBlockSizeScheduling // with these parameters. int NumShardsUsedByFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size); // ParallelFor shards the "total" units of work assuming each unit of work // having roughly "cost_per_unit" cost, in cycles. Each unit of work is // indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work // and the total cost of each shard is roughly the same. // // "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds // if not CPU-bound) to complete a unit of work. Overestimating creates too // many shards and CPU time will be dominated by per-shard overhead, such as // Context creation. Underestimating may not fully make use of the specified // parallelism, and may also cause inefficiencies due to load balancing // issues and stragglers. void ParallelFor(std::ptrdiff_t total, double cost_per_unit, const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn); static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit, const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) { TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn); } void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit, const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn); static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit, const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) { #ifdef _OPENMP ORT_UNUSED_PARAMETER(cost_per_unit); std::ptrdiff_t num_threads = concurrency::ThreadPool::NumThreads(tp); if (total < num_threads) { num_threads = total; } #pragma omp parallel for for (std::ptrdiff_t i = 0; i < num_threads; i++) { auto work = PartitionWork(i, num_threads, total); fn(work.start, work.end); } #else if (tp == nullptr) { fn(0, total); return; } tp->ParallelFor(total, cost_per_unit, fn); #endif } // Similar to ParallelFor above, but takes the specified scheduling strategy // into account. void ParallelFor(std::ptrdiff_t total, const SchedulingParams& scheduling_params, const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn); static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const SchedulingParams& scheduling_params, const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) { #ifdef _OPENMP ORT_UNUSED_PARAMETER(scheduling_params); std::ptrdiff_t num_threads = concurrency::ThreadPool::NumThreads(tp); if (total < num_threads) { num_threads = total; } #pragma omp parallel for for (std::ptrdiff_t i = 0; i < num_threads; i++) { auto work = PartitionWork(i, num_threads, total); fn(work.start, work.end); } #else if (tp == nullptr) { fn(0, total); return; } tp->ParallelFor(total, scheduling_params, fn); #endif } // Prefer using this API to get the number of threads unless you know what you're doing. // This API takes into account if openmp is enabled/disabled and if the thread pool ptr is nullptr. static int NumThreads(const concurrency::ThreadPool* tp); // Returns the number of threads in the pool. Preferably use the static version of this API instead. int NumThreads() const; // Returns current thread id between 0 and NumThreads() - 1, if called from a // thread in the pool. Returns -1 otherwise. int CurrentThreadId() const; // If ThreadPool implementation is compatible with Eigen::ThreadPoolInterface, // returns a non-null pointer. The caller does not own the object the returned // pointer points to, and should not attempt to delete. Eigen::ThreadPoolInterface* AsEigenThreadPool() const; // Directly schedule the 'total' tasks to the underlying threadpool, without // cutting them by halves void SimpleParallelFor(std::ptrdiff_t total, const std::function<void(std::ptrdiff_t)>& fn); inline static void TrySimpleParallelFor(ThreadPool* tp, std::ptrdiff_t total, const std::function<void(std::ptrdiff_t)>& fn) { #ifdef _OPENMP ORT_UNUSED_PARAMETER(tp); #pragma omp parallel for for (std::ptrdiff_t i = 0; i < total; ++i) { fn(i); } #else if (tp != nullptr) { tp->SimpleParallelFor(total, fn); } else { for (std::ptrdiff_t i = 0; i < total; ++i) { // In many cases, fn can be inlined here. fn(i); } } #endif } /** * Tries to call the given function in parallel, with calls split into (num_batches) batches. *\param num_batches If it is zero, it will be replaced to the value of NumThreads(). *\param fn A std::function or STL style functor with signature of "void f(int32_t);" * Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`. *For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should *be just 1. * * ``` **/ template <typename F> inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) { #ifdef _OPENMP ORT_UNUSED_PARAMETER(tp); ORT_UNUSED_PARAMETER(num_batches); #pragma omp parallel for for (std::ptrdiff_t i = 0; i < total; ++i) { fn(i); } #else if (tp == nullptr) { for (std::ptrdiff_t i = 0; i < total; ++i) { // In many cases, fn can be inlined here. fn(i); } return; } if (total <= 0) return; if (total == 1) { fn(0); return; } if (num_batches <= 0) { num_batches = std::min<ptrdiff_t>(total, tp->NumThreads()); } if (num_batches <= 1) { for (int i = 0; i < total; i++) { fn(i); } return; } tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) { auto work = PartitionWork(batch_index, num_batches, total); for (std::ptrdiff_t i = work.start; i < work.end; i++) { fn(i); } }); #endif } struct WorkInfo { std::ptrdiff_t start; std::ptrdiff_t end; }; /** Calculate the start and end offsets for a batch. @remarks Based on MlasPartitionWork */ static WorkInfo PartitionWork(std::ptrdiff_t batch_idx, std::ptrdiff_t num_batches, std::ptrdiff_t total_work) { const std::ptrdiff_t work_per_batch = total_work / num_batches; const std::ptrdiff_t work_per_batch_extra = total_work % num_batches; WorkInfo info; if (batch_idx < work_per_batch_extra) { info.start = (work_per_batch + 1) * batch_idx; info.end = info.start + work_per_batch + 1; } else { info.start = work_per_batch * batch_idx + work_per_batch_extra; info.end = info.start + work_per_batch; } return info; } ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool); private: // Divides the work represented by the range [0, total) into k shards. // Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k). // Each shard may be executed on a different thread in parallel, depending on // the number of threads available in the pool. // When (i+1)*block_size > total, fn(i*block_size, total) is called instead. // Here, k = NumShardsUsedByFixedBlockSizeScheduling(total, block_size). // Requires 0 < block_size <= total. void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size, const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn); ThreadOptions thread_options_; // underlying_threadpool_ is the user_threadpool if user_threadpool is // provided in the constructor. Otherwise it is the eigen_threadpool_. Eigen::ThreadPoolInterface* underlying_threadpool_; // eigen_threadpool_ is instantiated and owned by thread::ThreadPool if // user_threadpool is not in the constructor. std::unique_ptr<ThreadPoolTempl<Env> > eigen_threadpool_; }; } // namespace concurrency } // namespace onnxruntime
firstprivate.c
#include <stdio.h> #include <omp.h> #include <assert.h> int main() { int i=100,sum=0; // with it: #pragma omp parallel firstprivate (i) reduction(+:sum) { assert(i == 100); sum=sum + i; } i = 100; // without it: #pragma omp parallel private(i) reduction(+:sum) { //assert(i != 100); sum=sum + i; } return 0; }
looper.h
// // Created by Lei Ma on 9/6/17. // #ifndef HALO_PARALLEL_LOOPER_H #define HALO_PARALLEL_LOOPER_H #include "initializer.h" #include "stepper.h" #include "helper.h" #include <omp.h> //For openmp // #include "recorder.h"// For test namespace Looper { void vacuum_euler_forward( state_type rho_self_array [], const double dt, const int length){ // loop through for iter iterations for(int i =0; i<length-1; i++){ rho_self_array[i+1] = Stepper::vacuum_euler_forward(rho_self_array[i], dt) ; } } void interaction_euler_forward( state_type rho_self_array [], state_type rho_counter_array [], const double dt, const int length){ // loop through for iter iterations // state_type rhs; for(int i =0; i<length-1; i++){ Stepper::euler_forward(rho_self_array[i+1], rho_self_array[i], rho_counter_array[length - 1 - i], dt) ; // rho_self_array[i+1] = rhs; } } void halo_euler_forward( StateArray rho_forward_array, StateArray rho_backward_array, const double dt, const int length){ // loop through for iter iterations state_type rhs; for(int i =0; i<length-1; i++){ Stepper::euler_forward(rhs, rho_forward_array[i], rho_backward_array[length - 1 - i], dt) ; rho_forward_array[i+1] = rhs; Stepper::euler_forward(rhs, rho_backward_array[i], rho_forward_array[length - 1 - i], dt) ; rho_backward_array[i+1] = rhs; } } void halo_euler_forward_one(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, const double dt, const int totallength, const double reflection = 1, const double muf = 5.0, const double costheta = -1.0){ // loop through for iter iterations int length = totallength/2; #pragma omp parallel for for(int i =0; i<length-1; i++){ Stepper::euler_forward_one( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta) ; Stepper::euler_forward_one( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], dt, 1.0, muf, costheta) ; } } void halo_euler_forward_one_avg(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, const double dt, const int totallength, const double alpha, const double reflection = 1, const double muf = 5.0, const double costheta = -1.0){ // loop through for iter iterations int length = totallength/2; #pragma omp parallel for for(int i =0; i<length-1; i++){ Stepper::euler_forward_one( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta) ; Stepper::euler_forward_one( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], dt, 1.0, muf, costheta) ; double sumrecpf = 0.0; double sumrecpb = 0.0; double elef = 0.0; double eleb = 0.0; // Average the new results with old results for(int j=0; j<3;j++){ elef = alpha * (*rho_array_store_ptr)[i][j] + (1 - alpha) * (*rho_array_ptr)[i][j]; (*rho_array_ptr)[i][j] = elef; eleb= alpha * (*rho_array_store_ptr)[length + i][j] + (1 - alpha) * (*rho_array_ptr)[length + i][j]; (*rho_array_ptr)[length + i][j] = eleb; sumrecpf = sumrecpf + elef*elef; sumrecpb = sumrecpb + eleb*eleb; } sumrecpf = 1/( std::sqrt(sumrecpf) ); sumrecpb = 1/( std::sqrt(sumrecpb) ); for(int j=0;j < 3;j++) { (*rho_array_ptr)[i][j] = (*rho_array_ptr)[i][j] * sumrecpf; (*rho_array_ptr)[length + i][j] = (*rho_array_ptr)[length + i][j] * sumrecpb; } } } void halo_euler_forward_one_incline(StateArray *rho_array_ptr, StateArray *rho_array_store_ptr, const double dt, const int totallength, const double alpha, const double reflection = 1, const double muf = 5.0, const double costheta = -1.0) { // loop through for iter iterations int length = totallength / 2; double alpha_rescaled = alpha/length; #pragma omp parallel for for (int i = 0; i < length - 1; i++) { // state_type hamilf; // state_type hamilb; // Stepper::euler_forward_one_w_h has been validated and compared to previous results. Stepper::euler_forward_one_incline(alpha_rescaled, (*rho_array_ptr)[i + 1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta); Stepper::euler_forward_one_incline(alpha_rescaled, (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length - 1 + i], (*rho_array_store_ptr)[length - 1 - i], dt, 1.0, muf, costheta); // int ipfsign = Helper::sgnf(innerproductf); // int ipbsign = Helper::sgnf(innerproductb); } } void halo_evolution_op_one(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, const double dt, const int totallength, const double reflection = 1.0, const double muf = 5.0, const double costheta = -1.0){ // loop through for iter iterations int length = totallength/2; #pragma omp parallel for for(int i =0; i<length-1; i++){ Stepper::evolution_op_one( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], dt, reflection, muf, costheta) ; Stepper::evolution_op_one( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], dt, 1.0, muf, costheta) ; } } void halo_euler_forward_one_nunubar(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, StateArray* rho_another_array_ptr, StateArray* rho_another_array_store_ptr, const double dt, const int totallength, const double spectrum[2], const double reflection, const double mu_arr[2], const double costheta[4]){ // loop through for iter iterations // spectrum[2] = {left beam, right beam} // mu_arr = {mu_left, mu_right} or {mu_1,mu_2} for the two beams // I define {mu_self, mu_the_other} when doing the calculations since the Hamiltonian takes in such parameters. // costheta[4] = { cos(2theta_left), cos(2theta_right) , cos(theta_right- theta_left), cos (theta_right+theta_left) } int length = totallength/2; // when calculating the right beam, the order of spectrum is reversed so I define the reversed spectrum double spectrum_r[2]; double mu_arr_r[2]; spectrum_r[0] = spectrum[1]; // spectrum_r is for the calculation of the right beam spectrum_r[1] = spectrum[0]; mu_arr_r[0] = mu_arr[1]; mu_arr_r[1] = mu_arr[0]; // define the costheta's needed for each beam double costheta_l[3]; double costheta_r[3]; costheta_l[0] = costheta[0]; costheta_l[1] = costheta[3]; costheta_l[2] = costheta[2]; costheta_r[0] = costheta[1]; costheta_r[1] = costheta[3]; costheta_r[2] = costheta[2]; // build the reflection array double refl_arr_f[2]; refl_arr_f[1] = reflection; refl_arr_f[0] = 1.0; double refl_arr_b[2]; refl_arr_b[1] = 1.0; refl_arr_b[0] = reflection; // interaction_nunubar( state_type &h_store, const state_type &rho_counter, const state_type &rho_ya_counter, const state_type &rho_same_direction, const double spectrum[2], const double reflection[2], const double muf[2], const double costheta[3] ) #pragma omp parallel for for(int i =0; i<length-1; i++){ // the left beam forward Stepper::euler_forward_one_nunubar( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength - 2 - i], (*rho_another_array_store_ptr)[totallength - 2 - i], (*rho_another_array_store_ptr)[i], dt, spectrum, refl_arr_f, mu_arr, costheta_l) ; // right beam forward Stepper::euler_forward_one_nunubar( (*rho_another_array_ptr)[i+1], (*rho_another_array_store_ptr)[i], (*rho_another_array_store_ptr)[totallength - 2 - i], (*rho_array_store_ptr)[totallength - 2 - i], (*rho_array_store_ptr)[i], dt, spectrum_r, refl_arr_f, mu_arr_r, costheta_r) ; // left beam backward: left means it's the continuation of the original left beam, which is stored in the same array // Comment out to test bipolar model Stepper::euler_forward_one_nunubar( (*rho_array_ptr)[length + i], (*rho_array_store_ptr)[length -1 + i], (*rho_array_store_ptr)[length -1 - i], (*rho_another_array_store_ptr)[length - 1 - i], (*rho_another_array_store_ptr)[length-1+i], dt, spectrum, refl_arr_b, mu_arr, costheta_l); // right beam backward // Comment out to test bipolar model Stepper::euler_forward_one_nunubar( (*rho_another_array_ptr)[length + i], (*rho_another_array_store_ptr)[length -1 + i], (*rho_another_array_store_ptr)[length - 1 - i], (*rho_array_store_ptr)[length -1 - i], (*rho_array_store_ptr)[length-1+i], dt, spectrum_r, refl_arr_b, mu_arr_r, costheta_r); } } void halo_euler_forward_one_bipolar(StateArray* rho_array_ptr, StateArray* rho_array_store_ptr, StateArray* rho_another_array_ptr, StateArray* rho_another_array_store_ptr, const double dt, const int totallength, const double spectrum[2], const double reflection, const double mu_arr[2], const double costheta[4]){ // loop through for iter iterations // spectrum[2] = {left beam, right beam} // mu_arr = {mu_left, mu_right} or {mu_1,mu_2} for the two beams // I define {mu_self, mu_the_other} when doing the calculations since the Hamiltonian takes in such parameters. // costheta[4] = { cos(2theta_left), cos(2theta_right) , cos(theta_right- theta_left), cos (theta_right+theta_left) } int length = totallength/2; // when calculating the right beam, the order of spectrum is reversed so I define the reversed spectrum double spectrum_r[2]; double mu_arr_r[2]; spectrum_r[0] = spectrum[1]; // spectrum_r is for the calculation of the right beam spectrum_r[1] = spectrum[0]; mu_arr_r[0] = mu_arr[1]; mu_arr_r[1] = mu_arr[0]; // define the costheta's needed for each beam double costheta_l[3]; double costheta_r[3]; costheta_l[0] = costheta[0]; costheta_l[1] = costheta[3]; costheta_l[2] = costheta[2]; costheta_r[0] = costheta[1]; costheta_r[1] = costheta[3]; costheta_r[2] = costheta[2]; // build the reflection array double refl_arr_f[2]; refl_arr_f[1] = reflection; refl_arr_f[0] = 1.0; double refl_arr_b[2]; refl_arr_b[1] = 1.0; refl_arr_b[0] = reflection; // interaction_nunubar( state_type &h_store, const state_type &rho_counter, const state_type &rho_ya_counter, const state_type &rho_same_direction, const double spectrum[2], const double reflection[2], const double muf[2], const double costheta[3] ) #pragma omp parallel for for(int i =0; i<length-1; i++){ // the left beam forward Stepper::euler_forward_one_nunubar( (*rho_array_ptr)[i+1], (*rho_array_store_ptr)[i], (*rho_array_store_ptr)[totallength-2-i], (*rho_another_array_store_ptr)[totallength - 2 - i], (*rho_another_array_store_ptr)[i], dt, spectrum, refl_arr_f, mu_arr, costheta_l) ; // right beam forward Stepper::euler_forward_one_nunubar( (*rho_another_array_ptr)[i+1], (*rho_another_array_store_ptr)[i], (*rho_another_array_store_ptr)[totallength-2-i], (*rho_array_store_ptr)[totallength - 2 - i], (*rho_array_store_ptr)[i], dt, spectrum_r, refl_arr_f, mu_arr_r, costheta_r) ; } } } #endif //HALO_PARALLEL_LOOPER_H
dependences_mutexinoutset.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // GCC 9 introduced codegen for mutexinoutset // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // icc does not yet support mutexinoutset // XFAIL: icc // clang 9 introduced codegen for mutexinoutset // UNSUPPORTED: clang-4, clang-5, clang-6, clang-7, clang-8 #include "callback.h" #include <omp.h> #include <math.h> #include <unistd.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp master { print_ids(0); printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value, &x); #pragma omp task depend(out : x) { x++; delay(100); } print_fuzzy_address(1); print_ids(0); #pragma omp task depend(mutexinoutset : x) { x++; delay(100); } print_fuzzy_address(2); print_ids(0); #pragma omp task depend(in : x) { x = -1; } print_ids(0); } } x++; return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_inout)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_mutexinoutset)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[THIRD_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[THIRD_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_in)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]]
GB_binop__plus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__plus_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__plus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint8) // A*D function (colscale): GB (_AxD__plus_uint8) // D*A function (rowscale): GB (_DxB__plus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint8) // C=scalar+B GB (_bind1st__plus_uint8) // C=scalar+B' GB (_bind1st_tran__plus_uint8) // C=A+scalar GB (_bind2nd__plus_uint8) // C=A'+scalar GB (_bind2nd_tran__plus_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT8 || GxB_NO_PLUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
8.norace6.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 200 int main() { int A[N], x = 0; #pragma omp parallel for linear(x : 2) for (int i = 0; i < N; i++) A[i] = x; } // CHECK: Region is Data Race Free. // END
scatter_int.c
// create a list of 64 numbers, and only sum the even ones #include <stdio.h> #include <stdlib.h> #define N 32000 #define SCALE 16 int main() { srand(time(NULL)); int *numbers = malloc(sizeof(int)*N); int *result1 = malloc(sizeof(int)*N); int *result2 = malloc(sizeof(int)*N); int *mask = malloc(sizeof(int)*N); // Init the numbers for (int i = 0; i<N; i++) numbers[i] = rand() % 10; for (int i = 0; i<N; i++) { result1[i] = 0; result2[i] = 0; } for (int i = 0; i<N; i++) mask[i] = rand() % N; for (int i = 0; i<SCALE; i++) printf("%d ", numbers[i]); puts("\n---"); for (int i = 0; i<SCALE; i++) printf("%d ", mask[i]); puts("\n---"); puts("---------------------------------------------"); //Serial for (int i = 0; i<SCALE; i++) { result1[mask[i]] = numbers[mask[i]]; } #pragma omp simd simdlen(SCALE) for (int i = 0; i<SCALE; i++) { result2[mask[i]] = numbers[mask[i]]; } // print for (int i = 0; i<SCALE; i++) printf("%d ", result1[i]); puts("\n---"); for (int i = 0; i<SCALE; i++) printf("%d ", result2[i]); puts("\n---"); int errors = 0; for (int i = 0; i<SCALE; i++) { if (result1[i] != result2[i]) ++errors; } printf("Errors: %d\n", errors); //printf("Result1: %f | Result2: %f\n", result1, result2); return 0; }
sw-post.c
/* * In this module, we are given the results of a full SW run, * and we compute two things: * * 1. The probability the location produced the read (over all possible alignments). * Currently, we only sum over all alignments respecting the current gaps. * As a result, this is useless to do in letter space, where the mapper * cannot distinguish between errors and SNPs. * * 2. For each output letter, the probability that it is correct. * Only for color space. In letter space, this is given by the base quality value. * * Both are computed as scores. */ #include <assert.h> #include <ctype.h> #include <errno.h> #include <math.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <unistd.h> #include <zlib.h> #include <limits.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/time.h> #include "../common/fasta.h" #include "../common/util.h" #include "../common/sw-post.h" #include "../common/sw-full-common.h" #include "../common/time_counter.h" static int initialized; static double pr_snp; static double pr_xover; static double pr_del_open; static double pr_del_extend; static double pr_ins_open; static double pr_ins_extend; static bool use_read_qvs; static bool use_sanger_qvs; static int default_qual; // if no qvs, use this instead static int qual_vector_offset; // i.e. is there a useless qv for the initial base in cs? static int qual_delta; // how much to subtract from chars to get the int static int init_bp; static int len; //static double neglogsixteenth; //static double neglogfourth; typedef struct column{ double forwards[16]; //we'll misuse this for the viterbi double backwards[16]; double forwscale; //adding numerical stability double backscale; //adding numerical stability int ncols; int nlets; int letssize; int colssize; int* lets; int* cols; double* letserrrate; double* colserrrate; char backpointer[16]; //previous state for viterbi double posterior[4]; int max_posterior; int base_call; } states; static struct column * columns; static int max_len; static uint64_t cells, invocs; static time_counter tc; static int check; #pragma omp threadprivate(initialized,\ pr_snp,pr_xover,pr_del_open,pr_del_extend,pr_ins_open,pr_ins_extend,\ use_read_qvs,use_sanger_qvs,default_qual,qual_vector_offset,qual_delta,\ init_bp,len,columns,max_len,\ tc,cells,invocs,check) /********************************************************************************* * * BEGIN Forward-backward code * *********************************************************************************/ #define left(i) ( ((i) >> 2) & 3) #define right(i) ( (i) & 3) #define MIN2(i,j) ( ((i)< (j))? (i):(j)) /* In order to understand any of the code below, you need to understand color-space; Specifically that LETTER ^ LETTER = COLOR : T (00) ^ C (10) = 2 (10), etc. And that LETTER ^ COLOR = NEXTLETTER: T (00) ^ 3 (11) = A (11). */ /* compute prior probability of the node given the emissions. letters are thought to be at the left "side" of the pair emitted by the node */ double nodePrior(states* allstates, int i, int j) { //i is state, j is node in the state double val = 0; double errrate; int let, col, k; for (k = 0; k < allstates[i].nlets; k++) { let = allstates[i].lets[k]; errrate = allstates[i].letserrrate[k]; if (right(j) == let) { val = val - log(1-errrate); } else { val = val - log(errrate/3.0); } } //fprintf(stderr, "nodeprior: %g", val); for (k = 0; k < allstates[i].ncols; k++) { col = allstates[i].cols[k]; errrate = allstates[i].colserrrate[k]; if ((left(j) ^ right(j)) == col) { val = val - log(1-errrate); } else { val = val - log(errrate/3.0); } //fprintf(stderr, " %g\n", val); } return val; } /* Little helper for debugging */ void printStates(states* allstates, int stateslen, FILE* stream) { int i,j,k; fprintf(stream, "\nCONTIG %d", stateslen); for (i=0; i< stateslen; i++) { fprintf(stream, "\nCOLORS[%d] ",i); for (k = 0; k < allstates[i].ncols; k++) { fprintf(stream, "%d (%g)",allstates[i].cols[k], allstates[i].colserrrate[k]); } } for (i=0; i< stateslen; i++) { fprintf(stream, "\nFORWARDSS[%d] ",i); for (j=0; j< 16; j++) { fprintf(stream, "%.5g ",allstates[i].forwards[j] + allstates[i].forwscale); } } for (i=0; i< stateslen; i++) { fprintf(stream, "\nBACKWARDSS[%d] ",i); for (j=0; j< 16; j++) { fprintf(stream, "%.5g ",allstates[i].backwards[j] + allstates[i].backscale); } } for (i=0; i< stateslen; i++) { fprintf(stream, "\nLETS[%d] ",i); for (k = 0; k < allstates[i].nlets; k++) { fprintf(stream, "%d ",allstates[i].lets[k]); } fprintf(stream, "%c",base_to_char(allstates[i].max_posterior, LETTER_SPACE)); fprintf(stream, " %.5g %.5g %.5g %.5g", allstates[i].posterior[0],allstates[i].posterior[1],allstates[i].posterior[2],allstates[i].posterior[3]); } fprintf(stream, "\n"); } /*maximum posterior traceback */ void post_traceback (states* allstates, int stateslen, double norm_px) { int i = 0, j, maxval; for (i = 0; i < stateslen; i++) { for (j=0; j< 4; j++) allstates[i].posterior[j] = 0; for (j = 0; j < 16; j++) { // fprintf(stderr, "%g %g %g\n",allstates[i].forwards[j], allstates[i].backwards[j], norm_px); allstates[i].posterior[right(j)] += exp(-1 * (allstates[i].forwards[j] + allstates[i].backwards[j] + allstates[i].forwscale + allstates[i].backscale - norm_px)); // fprintf(stderr, "distrib[%d,%d] = %g\n", i,j, exp(-1 * (allstates[i].forwards[j] + allstates[i].backwards[j] + allstates[i].forwscale + allstates[i].backscale - norm_px))); } maxval = 0; for (j=1; j< 4; j++) { // fprintf(stderr, "let_distrib[%d,%d] = %g\n", i,j, distrib[j]); if (allstates[i].posterior[j] >allstates[i].posterior[maxval]) maxval = j; } // fprintf (stderr, "\n"); //if (allstates[i].posterior[maxval] > confrate) { allstates[i].max_posterior = maxval; //} //else { // allstates[i].max_posterior = BASE_N; //} } } /*viterbi traceback */ /* char* vit_traceback (states* allstates, int stateslen) { char* result = (char*) calloc (stateslen + 1, 1); int i,j; int minval, prev; assert(0); // not changed to right letter emission for (i = stateslen -1; i >= 0; i--) { minval = 0; for (j = 0; j< 16; j++) { if (allstates[i].forwards[j] < allstates[i].forwards[minval]) { minval = j; } } prev = allstates[i].backpointer[minval]; if (i && (left(minval) != right (prev))) { fprintf (stderr, "BACKTRACE error %d %d %d\n", i, minval, prev); exit(2); } result[i] = letmap[left(minval)]; } return result; } void viterbi (states* allstates, int stateslen) { int i,j,k,let,col; int minback; double valback; double val; assert(0); // not changed to right letter emission i = 0; for (j = 0; j < 16; j++) { allstates[i].forwards[j] = nodePrior(allstates,i,j); } for (i=1; i < stateslen; i++) { for (j = 0; j < 16; j++) { allstates[i].forwards[j] = nodePrior(allstates,i,j); minback = left(j); for (k = 1; k < 16; k++) { if (left(j) == right(k)) { if (allstates[i-1].forwards[k] < allstates[i-1].forwards[minback]) { minback = k; } } } valback = allstates[i-1].forwards[minback]; allstates[i].forwards[j] += valback; allstates[i].backpointer[j] = minback; } } } */ double do_backwards (states* allstates, int stateslen) { int i,j,k; //,let,col; double val; i = stateslen-1; allstates[i].backscale = 999999999; for (j = 0; j < 16; j++) { allstates[i].backwards[j] = 0; // matei change: bug fix allstates[i].backscale = MIN2 (allstates[i].backscale, allstates[i].backwards[j]); } for (j = 0; j < 16; j++) { allstates[i].backwards[j] -= allstates[i].backscale; } for (i = stateslen-2; i >=0; i--) { allstates[i].backscale = 999999999; memset(allstates[i].backwards, 0, 16 * sizeof(allstates[i].backwards[0])); // matei: bug fix for (j = 0; j < 16; j++) { for (k = 0; k < 16; k++) { if (right(j) == left(k)) { val = nodePrior(allstates,i+1,k); allstates[i].backwards[j] += exp(-1*(val + allstates[i+1].backwards[k])); } } // fprintf(stdout, "bw was [%d, %d] = %g\n", i, j, allstates[i].backwards[j]); allstates[i].backwards[j] = -log(allstates[i].backwards[j]); // + neglogfourth; allstates[i].backscale = MIN2 (allstates[i].backscale, allstates[i].backwards[j]); } for (j = 0; j < 16; j++) { allstates[i].backwards[j] -= allstates[i].backscale; // fprintf(stdout, "bw is [%d, %d] = %g\n", i, j, allstates[i].backwards[j]); } allstates[i].backscale += allstates[i+1].backscale; } val = 0; i = 0; for (j = 0; j < 16; j++) { if (left(j) == init_bp) { // matei change: second letter emission val += exp(-1*(allstates[i].backwards[j] + nodePrior(allstates,i,j))); // + neglogfourth)); } } return -log(val) + allstates[0].backscale; } double do_forwards (states* allstates, int stateslen) { int i,j,k; //,let,col; double val; i = 0; j = 0; allstates[i].forwscale = 999999999; for (j = 0; j < 16; j++) { if (left(j) == init_bp) { // matei change: second letter emission allstates[i].forwards[j] = nodePrior(allstates,i,j); // + neglogfourth; allstates[i].forwscale = MIN2 (allstates[i].forwscale, allstates[i].forwards[j]); } else { allstates[i].forwards[j] = HUGE_VAL; } } for (j = 0; j < 16; j++) { allstates[i].forwards[j] -= allstates[i].forwscale; } for (i=1; i < stateslen; i++) { allstates[i].forwscale = 999999999; memset(allstates[i].forwards, 0, 16 * sizeof(allstates[i].forwards[0])); // matei: bug fix for (j = 0; j < 16; j++) { val = nodePrior(allstates,i,j); for (k = 0; k < 16; k++) { if (left(j) == right(k)) { allstates[i].forwards[j] += exp(-1*(allstates[i-1].forwards[k])); } } allstates[i].forwards[j] = val - log(allstates[i].forwards[j]); //+ neglogfourth; allstates[i].forwscale = MIN2 (allstates[i].forwscale, allstates[i].forwards[j]); } for (j = 0; j < 16; j++) { allstates[i].forwards[j] -= allstates[i].forwscale; } allstates[i].forwscale += allstates[i-1].forwscale; } val = 0; i = stateslen-1; for (j = 0; j < 16; j++) { val += exp(-1*(allstates[i].forwards[j])); // matei change: bug fix } return -log(val)+ allstates[i].forwscale; } double forward_backward (states* allstates, int stateslen) { double no1, no2; no1 = do_forwards(allstates, stateslen); no2 = do_backwards(allstates, stateslen); #ifdef DEBUG_POST_SW fprintf (stderr, "SANITY CHECK: no1 == no2 %g %g\n", no1, no2); #endif // don't really want a hard assert due to precision issues return no1; } /********************************************************************************* * * END Forward-backward code * *********************************************************************************/ int post_sw_setup(int _max_len, double _pr_snp, double _pr_xover, double _pr_del_open, double _pr_del_extend, double _pr_ins_open, double _pr_ins_extend, bool _use_read_qvs, bool _use_sanger_qvs, int _qual_vector_offset, int _qual_delta, bool reset_stats) { assert(0 == BASE_0); assert((BASE_A ^ BASE_C) == BASE_1); assert((BASE_A ^ BASE_G) == BASE_2); assert((BASE_A ^ BASE_T) == BASE_3); assert((BASE_C ^ BASE_G) == BASE_3); assert((BASE_C ^ BASE_T) == BASE_2); assert((BASE_G ^ BASE_T) == BASE_1); pr_snp = _pr_snp; pr_xover = _pr_xover; pr_del_open = _pr_del_open; pr_del_extend = _pr_del_extend; pr_ins_open = _pr_ins_open; pr_ins_extend = _pr_ins_extend; qual_delta = _qual_delta; use_read_qvs = _use_read_qvs; use_sanger_qvs = _use_sanger_qvs; if (!use_read_qvs) { default_qual = qv_from_pr_err(pr_xover); //pr_xover = pr_err_from_qv(default_qual); } else { qual_vector_offset = _qual_vector_offset; } //neglogsixteenth = -log(1.0/16.0); //neglogfourth = -log(1.0/4.0); max_len = _max_len; columns = (struct column *)xmalloc(max_len * sizeof(columns[0])); for (int i = 0; i < max_len; i++) { columns[i].lets = (int *)xmalloc(1 * sizeof(columns[i].lets[0])); columns[i].cols = (int *)xmalloc(1 * sizeof(columns[i].cols[0])); columns[i].letserrrate = (double *)xmalloc(1 * sizeof(columns[i].letserrrate[0])); columns[i].colserrrate = (double *)xmalloc(1 * sizeof(columns[i].colserrrate[0])); } if (reset_stats) { cells = invocs = 0; tc.type = DEF_FAST_TIME_COUNTER; tc.counter = 0; } initialized = 1; check = 0; return 1; } int post_sw_cleanup() { for (int i = 0; i < max_len; i++) { free(columns[i].lets); free(columns[i].cols); free(columns[i].letserrrate); free(columns[i].colserrrate); } free(columns); return 1; } int post_sw_stats(uint64_t * _invocs, uint64_t * _cells, double * _secs) { if (_invocs != NULL) *_invocs = invocs; if (_cells != NULL) *_cells = cells; if (_secs != NULL) *_secs = time_counter_get_secs(&tc); return 1; } /* * Extract genome sequence, read, and qvs of interest. */ static void load_local_vectors(uint32_t * read, int _init_bp, char * qual, struct sw_full_results * sfrp) { int start_run, col; int min_qv; int i, j; start_run = 0; min_qv = 10000; for (j = 0; j < sfrp->read_start; j++) { col = EXTRACT(read, j); if (col == BASE_N) { start_run = BASE_N; min_qv = 0; j = sfrp->read_start; break; } start_run ^= col; if (use_read_qvs) min_qv = MIN(min_qv, (int)qual[qual_vector_offset+j]); } len = 0; for (i = 0; sfrp->dbalign[i] != 0; i++) { if (sfrp->qralign[i] != '-') { // ow, it's a deletion; nothing to do if (sfrp->dbalign[i] != '-') { // MATCH columns[len].nlets = 1; columns[len].lets[0] = fasta_get_initial_base(COLOUR_SPACE, &sfrp->dbalign[i]); // => BASE_A/C/G/T columns[len].letserrrate[0] = pr_snp; } else { columns[len].nlets = 0; } // MATCH or INSERTION columns[len].ncols = 1; col = EXTRACT(read, j); if ((len == 0 && start_run == BASE_N) || col == BASE_N) { //columns[len].ncols = 0; // no emission columns[len].cols[0] = BASE_0; columns[len].colserrrate[0] = .75; } else { columns[len].cols[0] = EXTRACT(read, j) ^ (len == 0? start_run : 0); if (use_read_qvs) { columns[len].colserrrate[0] = pr_err_from_qv((len == 0? MIN(min_qv, (int)qual[qual_vector_offset + j]) : (int)qual[qual_vector_offset + j]) - qual_delta); if (!use_sanger_qvs) { columns[len].colserrrate[0] /= (1 + columns[len].colserrrate[0]); } if (columns[len].colserrrate[0] > .75) columns[len].colserrrate[0] = .75; } else { columns[len].colserrrate[0] = pr_xover; } } columns[len].base_call = char_to_base(sfrp->qralign[i]); assert(base_to_char(columns[len].base_call, LETTER_SPACE) == toupper(sfrp->qralign[i])); len++; j++; } } init_bp = _init_bp; #ifdef DEBUG_POST_SW int _i; fprintf(stderr, "db: "); for (_i = 0; _i < len; _i++) { fprintf(stderr, " %c", columns[_i].nlets > 0 ? base_to_char(columns[_i].lets[0], LETTER_SPACE) : '-'); } fprintf(stderr, "\n"); fprintf(stderr, "qr: %c", base_to_char(init_bp, LETTER_SPACE)); for (_i = 0; _i < len; _i++) { fprintf(stderr, " %c ", (columns[_i].ncols > 0 ? base_to_char(columns[_i].cols[0], COLOUR_SPACE) : '-')); } fprintf(stderr, "\n"); fprintf(stderr, "qv: "); for (_i = 0; _i < len; _i++) { fprintf(stderr, "%3d ", qv_from_pr_err(columns[_i].colserrrate[0])); } fprintf(stderr, "\n"); #endif } static void get_base_qualities(struct sw_full_results * sfrp) { int i, k; sfrp->qual = (char *)xmalloc((strlen(sfrp->qralign) + 1) * sizeof(sfrp->qual[0])); for (i = 0, k = 0; sfrp->qralign[i] != 0; i++) { if (sfrp->qralign[i] != '-') { int tmp = columns[k].base_call != BASE_N ? qv_from_pr_corr(columns[k].posterior[columns[k].base_call]) : 0; if (tmp > 40) tmp = 40; sfrp->qual[k] = 33 + tmp; // always 33+ in SAM k++; } } assert(k == len); sfrp->qual[k] = 0; } static double get_posterior(struct sw_full_results * sfrp, double total_score) { int i; double res; res = exp(-total_score); // - len * neglogfourth)); for (i = 0; sfrp->dbalign[i] != 0; i++) { if (sfrp->dbalign[i] == '-') { res *= pr_ins_extend; if (i == 0 || sfrp->dbalign[i-1] != '-') { res *= pr_ins_open; } } else if (sfrp->qralign[i] == '-') { res *= pr_del_extend; if (i == 0 || sfrp->qralign[i-1] != '-') { res *= pr_del_open; } } } return res; } /* * Main method, called after full SW. */ void post_sw(uint32_t * read, int _init_bp, char * qual, struct sw_full_results * sfrp) { double total_score; //llint before = rdtsc(), after; TIME_COUNTER_START(tc); invocs++; assert(sfrp != NULL); assert(sfrp->dbalign != NULL); if (!initialized) abort(); #ifdef DEBUG_POST_SW int _i, _j, _last_base, _new_base; char const * spaces = " "; fprintf(stderr, "Post SW\n"); fprintf(stderr, "dbalign: %s%s\n", spaces + strlen(spaces) - sfrp->read_start - 1, sfrp->dbalign); fprintf(stderr, "qralign: %s%s (offset: %d)\n", spaces + strlen(spaces) - sfrp->read_start - 1, sfrp->qralign, sfrp->read_start); fprintf(stderr, "read cs: %c", base_to_char(_init_bp, LETTER_SPACE)); for (_i = 0, _j = 0; _i < (int)sfrp->read_start + (int)strlen(sfrp->qralign); _i++) { if (_j < sfrp->read_start) { fprintf(stderr, "%c", base_to_char(EXTRACT(read, _j), COLOUR_SPACE)); _j++; } else { if (sfrp->qralign[_i - sfrp->read_start] == '-') { fprintf(stderr, "-"); } else { fprintf(stderr, "%c", base_to_char(EXTRACT(read, _j), COLOUR_SPACE)); _j++; } } } fprintf(stderr, "\n"); fprintf(stderr, "read ls: "); _last_base = _init_bp; for (_i = 0, _j = 0; _i < (int)sfrp->read_start + (int)strlen(sfrp->qralign); _i++) { if (_j < sfrp->read_start) { _new_base = cstols(_last_base, EXTRACT(read, _j), false); fprintf(stderr, "%c", base_to_char(_new_base, LETTER_SPACE)); _last_base = _new_base; _j++; } else { if (sfrp->qralign[_i - sfrp->read_start] == '-') { fprintf(stderr, "-"); } else { _new_base = cstols(_last_base, EXTRACT(read, _j), false); fprintf(stderr, "%c", base_to_char(_new_base, LETTER_SPACE)); _last_base = _new_base; _j++; } } } fprintf(stderr, "\n"); fprintf(stderr, "read qv: "); for (_i = 0, _j = 0; _i < (int)sfrp->read_start + (int)strlen(sfrp->qralign); _i++) { if (_j < sfrp->read_start) { fprintf(stderr, "%c", use_read_qvs? qual[_j] : qual_delta + default_qual); _j++; } else { if (sfrp->qralign[_i - sfrp->read_start] == '-') { fprintf(stderr, " "); } else { fprintf(stderr, "%c", use_read_qvs? qual[_j] : qual_delta + default_qual); _j++; } } } fprintf(stderr, "\n"); #endif load_local_vectors(read, _init_bp, qual, sfrp); total_score = forward_backward(columns, len); post_traceback(columns, len, total_score); get_base_qualities(sfrp); sfrp->posterior = get_posterior(sfrp, total_score); #ifdef DEBUG_POST_SW fprintf(stderr, "don: "); for (_i = 0; _i < len; _i++) { fprintf(stderr, " %c", base_to_char(columns[_i].max_posterior, LETTER_SPACE)); } fprintf(stderr, "\n"); fprintf(stderr, "bqv: "); for (_i = 0; _i < len; _i++) { int res = columns[_i].posterior[columns[_i].max_posterior] > 1 - .00000001? 80 : (int)(-10.0*(log(1 - columns[_i].posterior[columns[_i].max_posterior])/log(10.0))); fprintf(stderr, " %3d", res); } fprintf(stderr, "\n"); fprintf(stderr, "qralign: "); for (_i = 0, _j = 0; sfrp->qralign[_i] != 0; _i++) { if (sfrp->qralign[_i] != '-') { fprintf(stderr, " %c", sfrp->qralign[_i]); } } fprintf(stderr, "\n"); fprintf(stderr, "bqv: "); for (_i = 0, _j = 0; sfrp->qralign[_i] != 0; _i++) { if (sfrp->qralign[_i] != '-') { fprintf(stderr, "%3d", (int)(sfrp->qual[_j] - qual_delta)); _j++; } } fprintf(stderr, "\n"); printStates(columns, len, stderr); #endif cells += 16*len; //after = rdtsc(); //ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tc); }
ktensor.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include <stdlib.h> #include <string.h> #include "../error/error.h" int sptNewKruskalTensor(sptKruskalTensor *ktsr, sptIndex nmodes, const sptIndex ndims[], sptIndex rank) { ktsr->nmodes = nmodes; ktsr->rank = rank; ktsr->ndims = (sptIndex*)malloc(nmodes*sizeof(sptIndex)); for(sptIndex i=0; i<nmodes; ++i) ktsr->ndims[i] = ndims[i]; ktsr->lambda = (sptValue*)malloc(rank*sizeof(sptValue)); ktsr->fit = 0.0; return 0; } /** * Shuffle factor matrices row indices. * * @param[in] ktsr Kruskal tensor to be shuffled * @param[out] map_inds is the renumbering mapping * */ void sptKruskalTensorInverseShuffleIndices(sptKruskalTensor * ktsr, sptIndex ** map_inds) { /* Renumber factor matrices rows */ sptIndex new_i; for(sptIndex m=0; m < ktsr->nmodes; ++m) { sptMatrix * mtx = ktsr->factors[m]; sptIndex * mode_map_inds = map_inds[m]; sptValue * tmp_values = malloc(mtx->cap * mtx->stride * sizeof (sptValue)); for(sptIndex i=0; i<mtx->nrows; ++i) { new_i = mode_map_inds[i]; for(sptIndex j=0; j<mtx->ncols; ++j) { tmp_values[i * mtx->stride + j] = mtx->values[new_i * mtx->stride + j]; } } free(mtx->values); mtx->values = tmp_values; } } void sptFreeKruskalTensor(sptKruskalTensor *ktsr) { ktsr->rank = 0; ktsr->fit = 0.0; free(ktsr->ndims); free(ktsr->lambda); for(sptIndex i=0; i<ktsr->nmodes; ++i) sptFreeMatrix(ktsr->factors[i]); free(ktsr->factors); ktsr->nmodes = 0; } double KruskalTensorFit( sptSparseTensor const * const spten, sptValue const * const __restrict lambda, sptMatrix ** mats, sptMatrix ** ata) { sptIndex const nmodes = spten->nmodes; double spten_normsq = SparseTensorFrobeniusNormSquared(spten); // printf("spten_normsq: %lf\n", spten_normsq); double const norm_mats = KruskalTensorFrobeniusNormSquared(nmodes, lambda, ata); // printf("norm_mats: %lf\n", norm_mats); double const inner = SparseKruskalTensorInnerProduct(nmodes, lambda, mats); // printf("inner: %lf\n", inner); double residual = spten_normsq + norm_mats - 2 * inner; // printf("residual: %lf\n", residual); if (residual > 0.0) { residual = sqrt(residual); } double fit = 1 - (residual / sqrt(spten_normsq)); return fit; } // Column-major. /* Compute a Kruskal tensor's norm is compute on "ata"s. Check Tammy's sparse */ double KruskalTensorFrobeniusNormSquared( sptIndex const nmodes, sptValue const * const __restrict lambda, sptMatrix ** ata) // ata: column-major { sptIndex const rank = ata[0]->ncols; sptIndex const stride = ata[0]->stride; sptValue * const __restrict tmp_atavals = ata[nmodes]->values; // Column-major double norm_mats = 0; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex x=0; x < rank*stride; ++x) { tmp_atavals[x] = 1.; } /* Compute Hadamard product for all "ata"s */ for(sptIndex m=0; m < nmodes; ++m) { sptValue const * const __restrict atavals = ata[m]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex i=0; i < rank; ++i) { for(sptIndex j=i; j < rank; ++j) { tmp_atavals[j * stride + i] *= atavals[j * stride + i]; } } } /* compute lambda^T * aTa[MAX_NMODES] * lambda, only compute a half of them because of its symmetric */ #ifdef PARTI_USE_OPENMP #pragma omp parallel for reduction(+:norm_mats) #endif for(sptIndex i=0; i < rank; ++i) { norm_mats += tmp_atavals[i+(i*stride)] * lambda[i] * lambda[i]; for(sptIndex j=i+1; j < rank; ++j) { norm_mats += tmp_atavals[i+(j*stride)] * lambda[i] * lambda[j] * 2; } } return fabs(norm_mats); } // Row-major, compute via MTTKRP result (mats[nmodes]) and mats[nmodes-1]. double SparseKruskalTensorInnerProduct( sptIndex const nmodes, sptValue const * const __restrict lambda, sptMatrix ** mats) { sptIndex const rank = mats[0]->ncols; sptIndex const stride = mats[0]->stride; sptIndex const last_mode = nmodes - 1; sptIndex const I = mats[last_mode]->nrows; // printf("mats[nmodes-1]:\n"); // sptDumpMatrix(mats[nmodes-1], stdout); // printf("mats[nmodes]:\n"); // sptDumpMatrix(mats[nmodes], stdout); sptValue const * const last_vals = mats[last_mode]->values; sptValue const * const tmp_vals = mats[nmodes]->values; sptValue * buffer_accum; double inner = 0; double * const __restrict accum = (double *) malloc(rank*sizeof(*accum)); #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex r=0; r < rank; ++r) { accum[r] = 0.0; } #ifdef PARTI_USE_OPENMP #pragma omp parallel { int const nthreads = omp_get_num_threads(); #pragma omp master { buffer_accum = (sptValue *)malloc(nthreads * rank * sizeof(sptValue)); for(sptIndex j=0; j < nthreads * rank; ++j) buffer_accum[j] = 0.0; } } #endif #ifdef PARTI_USE_OPENMP #pragma omp parallel { int const tid = omp_get_thread_num(); int const nthreads = omp_get_num_threads(); sptValue * loc_accum = buffer_accum + tid * rank; #pragma omp for for(sptIndex i=0; i < I; ++i) { for(sptIndex r=0; r < rank; ++r) { loc_accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)]; } } #pragma omp for for(sptIndex j=0; j < rank; ++j) { for(int i=0; i < nthreads; ++i) { accum[j] += buffer_accum[i*rank + j]; } } } #else for(sptIndex i=0; i < I; ++i) { for(sptIndex r=0; r < rank; ++r) { accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)]; } } #endif #ifdef PARTI_USE_OPENMP #pragma omp parallel for reduction(+:inner) #endif for(sptIndex r=0; r < rank; ++r) { inner += accum[r] * lambda[r]; } #ifdef PARTI_USE_OPENMP free(buffer_accum); #endif return inner; }
packet-inl.h
/*! * Copyright (c) 2014 by Contributors * \file packet-inl.h * \brief Generic packet vectorization code */ #ifndef MSHADOW_PACKET_INL_H_ #define MSHADOW_PACKET_INL_H_ #ifdef __APPLE__ #include <stdlib.h> #else #include <malloc.h> #endif #include "./base.h" #include "./tensor.h" #include "./expression.h" namespace mshadow { /*! \brief namespace of packet math*/ namespace packet { enum PacketArch { kPlain, kSSE2, }; #if MSHADOW_USE_SSE #define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kSSE2 #else #define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kPlain #endif // whether packet operator is enabled. /*! * \brief Generic packet type * \tparam DType The data type of the packet. * \tparam Arch the Arch of the packet. */ template<typename DType, PacketArch Arch = MSHADOW_DEFAULT_PACKET> struct Packet; template<PacketArch Arch> struct AlignBytes { static const index_t value = 4; }; } // namespace packet } // namespace mshadow namespace mshadow { namespace packet { /*! * \brief analog to cudaMallocPitch, allocate a aligned space with num_line * lspace cells * \param out_pitch output parameter, the actuall space allocated for each line * \param lspace number of cells required for each line * \param num_line number of lines to be allocated */ inline void* AlignedMallocPitch(size_t *out_pitch, size_t lspace, size_t num_line) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t mask = (1 << bits) - 1; size_t pitch = ((lspace + mask) >> bits) << bits; *out_pitch = pitch; #ifdef _MSC_VER void *res = _aligned_malloc(pitch * num_line, 1 << bits); #else void *res; int ret = posix_memalign(&res, 1 << bits, pitch * num_line); CHECK_EQ(ret, 0) << "AlignedMallocPitch failed"; #endif if (res == NULL) { LOG(FATAL) << "AlignedMallocPitch failed"; } return res; } /*! * \brief free aligned space * \param ptr pointer to space to be freed */ inline void AlignedFree(void *ptr) { #ifdef _MSC_VER _aligned_free(ptr); #else free(ptr); #endif } /*! \brief check if a pointer is aligned */ template<PacketArch Arch> inline bool CheckAlign(size_t pitch) { const index_t bits = AlignBytes<Arch>::value; return !(pitch & ((1 << bits) - 1)); } /*! \brief check if a pointer is aligned */ template<PacketArch Arch> inline bool CheckAlign(void *ptr) { return CheckAlign<Arch>(reinterpret_cast<size_t>(ptr)); } /*! * \brief get upper bound of aligned index of size * \param size size of the array * \param fsize size of float */ template<typename DType, PacketArch Arch> inline index_t UpperAlign(index_t size) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t mask = (1 << bits) - 1; const index_t fsize = sizeof(DType); return (((size * fsize + mask) >> bits) << bits) / fsize; } /*! * \brief get lower bound of aligned index of size * \param size size of the array * \param fsize size of float */ template<typename DType, PacketArch Arch> inline index_t LowerAlign(index_t size) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t fsize = sizeof(DType); return (((size * fsize) >> bits) << bits) / fsize; } /*! * \brief generic Packet operator * \tparam OP The operator * \tparam DType The data type * \tparam Arch The architecture. */ template<typename OP, typename DType, PacketArch Arch> struct PacketOp { static const bool kEnabled = false; }; // specialization of operators template<typename DType, PacketArch Arch> struct PacketOp<op::plus, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs + rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::minus, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs - rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::mul, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs * rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::div, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs / rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::identity, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& src) { return src; } }; // savers to do storage template<typename SV, typename TFloat, PacketArch Arch> struct Saver{ MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) { Packet<TFloat, Arch> lhs = Packet<TFloat, Arch>::Load(dst); Packet<TFloat, Arch> ans = PacketOp<typename SV::OPType, TFloat, Arch>::Map(lhs, src); ans.Store(dst); } }; template<typename TFloat, PacketArch Arch> struct Saver<sv::saveto, TFloat, Arch> { MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) { src.Store(dst); } }; } // namespace packet } // namespace mshadow #include "packet/plain-inl.h" #if MSHADOW_USE_SSE && !defined(__CUDACC__) #include "packet/sse-inl.h" #endif namespace mshadow { namespace expr { typedef packet::PacketArch PacketArch; // same as plan, but use packet template<typename ExpType, typename DType, PacketArch Arch> class PacketPlan { public: /*! * \brief evaluate the expression at index [y][x], * x will be aligned to Packet<DType, Arch>::kSize */ MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const; MSHADOW_CINLINE DType Eval(index_t y, index_t x) const; }; template <typename Device, int dim, typename DType, PacketArch Arch> class PacketPlan<Tensor<Device, dim, DType>, DType, Arch> { public: explicit PacketPlan(const Tensor<Device, dim, DType> &t) :dptr_(t.dptr_), stride_(t.stride_) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::Packet<DType, Arch>::Load(&dptr_[y * stride_ + x]); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return dptr_[y * stride_ + x]; } private: const DType *dptr_; index_t stride_; }; template<typename DType, PacketArch Arch> class PacketPlan<ScalarExp<DType>, DType, Arch> { public: explicit PacketPlan(DType scalar) : scalar_(scalar) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::Packet<DType, Arch>::Fill(scalar_); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return scalar_; } private: DType scalar_; }; template<typename OP, typename TA, typename TB, int etype, typename DType, PacketArch Arch> class PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> { public: PacketPlan(const PacketPlan<TA, DType, Arch> &lhs, const PacketPlan<TB, DType, Arch> &rhs) : lhs_(lhs), rhs_(rhs) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::PacketOp<OP, DType, Arch>::Map(lhs_.EvalPacket(y, x), rhs_.EvalPacket(y, x)); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return OP::Map(lhs_.Eval(y, x), rhs_.Eval(y, x)); } private: PacketPlan<TA, DType, Arch> lhs_; PacketPlan<TB, DType, Arch> rhs_; }; template<typename OP, typename TA, int etype, typename DType, PacketArch Arch> class PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> { public: PacketPlan(const PacketPlan<TA, DType, Arch> &src) : src_(src) {} MSHADOW_CINLINE packet::Packet<DType> EvalPacket(index_t y, index_t x) const { return packet::PacketOp<OP, DType, Arch>::Map(src_.EvalPacket(y, x)); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return OP::Map(src_.Eval(y, x)); } private: PacketPlan<TA, DType, Arch> src_; }; template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype> inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e); template<PacketArch Arch, typename DType> inline PacketPlan<ScalarExp<DType>, DType, Arch> MakePacketPlan(const ScalarExp<DType> &e) { return PacketPlan<ScalarExp<DType>, DType, Arch>(e.scalar_); } template<PacketArch Arch, typename T, typename DType> inline PacketPlan<T, DType, Arch> MakePacketPlan(const RValueExp<T, DType> &e) { return PacketPlan<T, DType, Arch>(e.self()); } template<PacketArch Arch, typename T, int dim, typename DType> inline PacketPlan<T, DType, Arch> MakePacketPlan(const MakeTensorExp<T, cpu, dim, DType> &e) { return PacketPlan<T, DType, Arch>(e.real_self()); } template<PacketArch Arch, typename OP, typename TA, typename DType, int etype> inline PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> MakePacketPlan(const UnaryMapExp<OP, TA, DType, etype> &e) { return PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.src_)); } template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype> inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e) { return PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.lhs_), MakePacketPlan<Arch>(e.rhs_)); } /*! * \brief static check packet enable * * \tparam Device the type of Device * \tparam dim dimension of the tensor * \tparam E expression */ template<typename E, PacketArch Arch> struct PacketCheck{ static const bool kPass = false; }; template<PacketArch Arch> struct PacketCheck<float, Arch> { static const bool kPass = true; }; template<PacketArch Arch> struct PacketCheck<double, Arch> { static const bool kPass = true; }; template<typename DType, PacketArch Arch> struct PacketCheck<ScalarExp<DType>, Arch> { static const bool kPass = PacketCheck<DType, Arch>::kPass; }; template<int dim, typename DType, PacketArch Arch> struct PacketCheck<Tensor<cpu, dim, DType>, Arch> { static const bool kPass = PacketCheck<DType, Arch>::kPass; }; template<typename OP, typename TA, typename DType, int etype, PacketArch Arch> struct PacketCheck<UnaryMapExp<OP, TA, DType, etype>, Arch> { static const bool kPass = PacketCheck<TA, Arch>::kPass && packet::PacketOp<OP, DType, Arch>::kEnabled; }; template<typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch> struct PacketCheck< BinaryMapExp<OP, TA, TB, DType, etype>, Arch> { static const bool kPass = packet::PacketOp<OP, DType, Arch>::kEnabled && PacketCheck<TA, Arch>::kPass && PacketCheck<TB, Arch>::kPass; }; //---------------------------------------------------- // Check if data is aligned and allow packet operation //---------------------------------------------------- template<int dim, typename E, PacketArch Arch> struct PacketAlignCheck { inline static bool Check(const E &exp) { return false; } }; template<int dim, typename DType, PacketArch Arch> struct PacketAlignCheck<dim, ScalarExp<DType>, Arch> { inline static bool Check(const ScalarExp<DType> &exp) { return true; } }; template<int dim, typename DType, PacketArch Arch> struct PacketAlignCheck<dim, Tensor<cpu, dim, DType>, Arch> { inline static bool Check(const Tensor<cpu, dim, DType> &t) { return packet::CheckAlign<Arch>(t.dptr_) && packet::CheckAlign<Arch>(t.stride_ * sizeof(DType)); } }; template<int dim, typename OP, typename TA, typename DType, int etype, PacketArch Arch> struct PacketAlignCheck<dim, UnaryMapExp<OP, TA, DType, etype>, Arch> { inline static bool Check(const UnaryMapExp<OP, TA, DType, etype> &t) { return PacketAlignCheck<dim, TA, Arch>::Check(t.src_); } }; template<int dim, typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch> struct PacketAlignCheck<dim, BinaryMapExp<OP, TA, TB, DType, etype>, Arch> { inline static bool Check(const BinaryMapExp<OP, TA, TB, DType, etype> &t) { return PacketAlignCheck<dim, TA, Arch>::Check(t.lhs_) && PacketAlignCheck<dim, TB, Arch>::Check(t.rhs_); } }; /*! * \brief use PacketPlan to compute result */ template<typename SV, typename E, int dim, typename DType, PacketArch Arch> inline void MapPacketPlan(Tensor<cpu, dim, DType> _dst, const expr::PacketPlan<E, DType, Arch>& plan) { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); const index_t xlen = packet::LowerAlign<DType, Arch>(dst.size(1)); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t x = 0; x < xlen; x += packet::Packet<DType, Arch>::kSize) { packet::Saver<SV, DType, Arch>::Save(&dst[y][x], plan.EvalPacket(y, x)); } for (index_t x = xlen; x < dst.size(1); ++x) { SV::Save(dst[y][x], plan.Eval(y, x)); } } } } // namespace expr } // namespace mshadow #endif // MSHADOW_PACKET_INL_H_
gemv_openmp.c
#include <stdlib.h> #include <stdio.h> #include "../../support/timer.h" #include "gemv_utils.h" #include <sys/mman.h> #include <omp.h> #include <stdbool.h> #include <math.h> size_t m, m_; // m_ : rows size_t n, n_; // n_ : cols size_t k, k_; int main(int argc, char *argv[]) { if (mlockall(MCL_CURRENT | MCL_FUTURE)) { perror("mlockall failed:"); return 0; } //m_ = 20480; //n_ = 8192; //k_ = 8001; m_ = 1001; n_ = 1000; k_ = 999; m = m_ + 4 - m_%4; n = n_ + 4 - n_%4; k = k_ + 4 - k_%4; double *A, *b, *b2, *b3, *x; A = (double*) malloc(sizeof(double)*m*n); b = (double*) malloc(sizeof(double)*m*k); b2 = (double*) malloc(sizeof(double)*m*k); b3 = (double*) malloc(sizeof(double)*m*k); x = (double*) malloc(sizeof(double)*n*k); make_hilbert_mat(m, n, m_, n_, A); make_hilbert_mat(n, k, n_, k_, x); #pragma omp for for (size_t p = 0; p < m; p++) { for (size_t q = 0; q < k; q++) { b[p*k+q] = (double) 0.0; b2[p*k+q] = (double) 0.0; b3[p*k+q] = (double) 0.0; } } Timer timer; start(&timer, 0, 0); gemv(A, x, m, n, k, b, 1); stop(&timer, 0); start(&timer, 1, 0); vec_dgemm_opt_c(m, n, k, b2, A, x); stop(&timer, 1); start(&timer, 2, 0); gemv(A, x, m, n, k, b3, 4); stop(&timer, 2); double sum_1 = sum_vec(b, m, k); double sum_2 = sum_vec(b2, m, k); double sum_3 = sum_vec(b3, m, k); bool hwacha_correct = sum_1 == sum_2; bool multi_correct = sum_1 == sum_3; for (int p = 0; p < m; p++) { for (int q = 0; q < k; q++) { if (b[p * k + q] != b2[p * k + q]) { printf("%d %d : %30.25lf %30.25lf\n", p, q, b[p * k + q], b2[p * k + q]); } } } printf("%30.25lf %30.25lf\n", sum_1, sum_2); if (hwacha_correct) { printf("Both works correctly.\n"); } else if (multi_correct) { printf("Hwacha outputs wrong result!\n"); } else if (hwacha_correct) { printf("Hwacha works correctly, but not multi-threading\n"); } else { printf("Both wrong\n"); } printf("******************************\n"); printf("CPU "); print(&timer, 0, 1); printf("\n"); printf("Hwacha "); print(&timer, 1, 1); printf("\n"); printf("4 threads "); print(&timer, 2, 1); printf("\n"); #if 0 print_vec(x, m_); print_mat(A, m_, n_); print_vec(b, m_); #endif free(A); free(b); free(b2); free(b3); free(x); munlockall(); return 0; } void gemv(double* A, double* x, size_t m, size_t n, size_t k, double* b, int t) { omp_set_num_threads(t); #pragma omp parallel for for (size_t p = 0; p < m; p++) { for (size_t q = 0; q < k; q++) { for (size_t r = 0; r < n; r++) { b[p*k+q] += A[p*n+r]*x[r*k+q]; } } } } void make_hilbert_mat(size_t r, size_t c, size_t r_, size_t c_, double* m) { #pragma omp parallel for for (size_t p = 0; p < r; p++) { for (size_t q = 0; q < c; q++) { if (p < r_ && q < c_) { m[p*c+q] = 1 / ((double) p + (double) q + 1.0); } else { m[p*c+q] = (double) 0.0; } } } } double sum_vec(double* vec, size_t m, size_t k) { double sum = 0.0; #pragma omp parallel for reduction(+:sum) for (int p = 0; p < m; p++) for (int q = 0; q < k; q++) sum = sum + vec[p*k+q]; return sum; }
lbfgs_utils.h
/** * Copyright (c) 2015 by Contributors */ #ifndef DIFACTO_LBFGS_LBFGS_UTILS_H_ #define DIFACTO_LBFGS_LBFGS_UTILS_H_ #include <string> #include <vector> #include "dmlc/memory_io.h" #include "dmlc/omp.h" #include "difacto/base.h" #include "difacto/sarray.h" namespace difacto { namespace lbfgs { struct Job { static const int kPrepareData = 1; static const int kInitServer = 2; static const int kInitWorker = 3; static const int kPushGradient = 4; static const int kPrepareCalcDirection = 5; static const int kCalcDirection = 6; static const int kLineSearch = 7; static const int kSaveModel = 8; static const int kEvaluate = 8; int type; std::vector<real_t> value; void SerializeToString(std::string* str) const { dmlc::Stream* ss = new dmlc::MemoryStringStream(str); ss->Write(type); ss->Write(value); delete ss; } void ParseFromString(const std::string& str) { auto copy = str; dmlc::Stream* ss = new dmlc::MemoryStringStream(&copy); ss->Read(&type); ss->Read(&value); delete ss; } }; struct Progress { real_t objv; // objective value on training data real_t auc; // auc on tarining data real_t val_auc; // auc on evaluation data real_t nnz_w; // number of nonzero entries in the model void SerializeToVector(std::vector<real_t>* vec) const { vec->resize(sizeof(Progress)/sizeof(real_t)); memcpy(vec->data(), reinterpret_cast<char const*>(this), sizeof(Progress)); } void ParseFromVector(const std::vector<real_t>& vec) { CHECK_EQ(sizeof(Progress), vec.size()*sizeof(real_t)); memcpy(reinterpret_cast<char*>(this), vec.data(), sizeof(Progress)); } }; /** * \brief return <a, b> */ inline double Inner(const SArray<real_t>& a, const SArray<real_t>& b, int nthreads = DEFAULT_NTHREADS) { double res = 0; CHECK_EQ(a.size(), b.size()); real_t const *ap = a.data(); real_t const *bp = b.data(); #pragma omp parallel for reduction(+:res) num_threads(nthreads) for (size_t i = 0; i < a.size(); ++i) res += ap[i] * bp[i]; return res; } /** * \brief b += x * a */ inline void Add(real_t x, const SArray<real_t>& a, SArray<real_t>* b, int nthreads = DEFAULT_NTHREADS) { CHECK_EQ(a.size(), b->size()); if (x == 0) return; real_t const *ap = a.data(); real_t *bp = b->data(); if (x == 1) { #pragma omp parallel for num_threads(nthreads) for (size_t i = 0; i < a.size(); ++i) bp[i] += ap[i]; } else { #pragma omp parallel for num_threads(nthreads) for (size_t i = 0; i < a.size(); ++i) bp[i] += x * ap[i]; } } /** * \brief a *= x */ inline void Times(real_t x, SArray<real_t>* a, int nthreads = DEFAULT_NTHREADS) { if (x == 1) return; real_t *ap = a->data(); #pragma omp parallel for num_threads(nthreads) for (size_t i = 0; i < a->size(); ++i) ap[i] *= x; } inline void RemoveTailFeatures(const SArray<feaid_t>& feaids, const SArray<real_t>& feacnts, real_t threshold, SArray<feaid_t>* filtered) { CHECK_EQ(feaids.size(), feacnts.size()); size_t n = 0; for (size_t i = 0; i < feaids.size(); ++i) if (feacnts[i] > threshold) ++n; filtered->resize(n); feaid_t* f = filtered->data(); n = 0; for (size_t i = 0; i < feaids.size(); ++i) { if (feacnts[i] > threshold) f[n++] = feaids[i]; } } } // namespace lbfgs } // namespace difacto #endif // DIFACTO_LBFGS_LBFGS_UTILS_H_
Exercice1.c
#include <stdio.h> #include <stdlib.h> #include "stdbool.h" int main(int argc, char **argv){ int n_iter = 1000; bool is_prime[n_iter]; #pragma omp parallel for for(int index=0; index < n_iter; index++){ long potential_prime = rand() % (4000000000 + 1); for (long multiple = 2; multiple < potential_prime; multiple++){ if ((potential_prime % multiple) == 0){ is_prime[index] = false; break; } } } return 0; }
mysql_fmt_plug.c
/* MYSQL_half_fmt.c * * Copyright (c) 2008 by <earthquake at rycon.hu> * * John the ripper MYSQL-fast module * * * Note: The mysql hash's first 8byte is relevant, * the another ones depends on the first 8. Maybe * the passwords after 9-10character have collision * in the first 8byte, so we have to check the full * hash. * * Unbelievable good optimization by Péter Kasza * * http://rycon.hu/ * * OpenMP support and other assorted hacks by Solar Designer */ #if FMT_EXTERNS_H extern struct fmt_main fmt_MYSQL_fast; #elif FMT_REGISTERS_H john_register_one(&fmt_MYSQL_fast); #else #include <stdio.h> #include <stdlib.h> #include <string.h> #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 81920 #endif #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "memdbg.h" #define FORMAT_LABEL "mysql" #define FORMAT_NAME "MySQL pre-4.1" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 16 #define BINARY_SIZE 4 #define SALT_SIZE 0 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 8 static struct fmt_tests tests[] = { // ciphertext, plaintext {"445ff82636a7ba59", "probe"}, {"60671c896665c3fa", "a"}, {"1acbed4a27b20da3", "hash"}, {"77ff75006118bab8", "hacker"}, {"1b38cd9c2f809809", "hacktivity2008"}, {"1b38cd9c2f809809", "hacktivity 2008"}, {"6fc81597422015a8", "johnmodule"}, {"30f098972cc8924d", "http://guh.nu"}, {"3fc56f6037218993", "Andrew Hintz"}, {"697a7de87c5390b2", "drew"}, {"1eb71cf460712b3e", "http://4tphi.net"}, {"28ff8d49159ffbaf", "http://violating.us"}, {"5d2e19393cc5ef67", "password"}, {"5030573512345671", ""}, {"723d80f65bf9d670", "UPPERCASE"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } static int valid(char* ciphertext, struct fmt_main *self) { unsigned int i; if (strlen(ciphertext) != CIPHERTEXT_LENGTH) return 0; for (i = 0; i < CIPHERTEXT_LENGTH; i++) if (atoi16[ARCH_INDEX(ciphertext[i])] > 15) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; memcpy(out, ciphertext, CIPHERTEXT_LENGTH); out[CIPHERTEXT_LENGTH] = 0; strlwr(out); return out; } static void *get_binary_size(char *ciphertext, int size) { /* maybe bigger than BINARY_SIZE for use from cmp_exact() */ static ARCH_WORD_32 buff_[8]; unsigned char *buff = (unsigned char *)buff_; unsigned int i; for (i = 0; i < size; i++) { #if ARCH_LITTLE_ENDIAN buff[(i & ~3U) | (3 - (i & 3))] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; #else buff[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; #endif } return buff; } static void *get_binary(char *ciphertext) { return get_binary_size(ciphertext, BINARY_SIZE); } static void set_key(char* key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char* get_key(int index) { return saved_key[index]; } static int cmp_one(void* binary, int index) { return *(ARCH_WORD_32 *)binary == crypt_key[index][0]; } static int cmp_all(void* binary, int count) { int i; #ifdef _OPENMP int retval = 0; #pragma omp parallel for default(none) private(i) shared(count, binary, crypt_key, retval) for (i = 0; i < count; i++) if (*(ARCH_WORD_32 *)binary == crypt_key[i][0]) #pragma omp atomic retval |= 1; return retval; #else for (i = 0; i < count; i++) if (*(ARCH_WORD_32 *)binary == crypt_key[i][0]) return 1; return 0; #endif } static int cmp_exact(char* source, int index) { register ARCH_WORD_32 nr = 1345345333, add = 7, nr2 = 0x12345671; register ARCH_WORD_32 tmp; unsigned char *p; p = (unsigned char *)saved_key[index]; for (; *p; p++) { if (*p == ' ' || *p == '\t') continue; tmp = (ARCH_WORD_32)*p; nr ^= (((nr & 63) + add) * tmp) + (nr << 8); nr2 += (nr2 << 8) ^ nr; add += tmp; } #if 0 { char ctmp[CIPHERTEXT_LENGTH + 1]; sprintf(ctmp, "%08x%08x", nr & (((ARCH_WORD_32)1 << 31) - 1), nr2 & (((ARCH_WORD_32)1 << 31) - 1)); return !memcmp(source, ctmp, CIPHERTEXT_LENGTH); } #else { ARCH_WORD_32 *binary = get_binary_size(source, 8); return binary[0] == (nr & (((ARCH_WORD_32)1 << 31) - 1)) && binary[1] == (nr2 & (((ARCH_WORD_32)1 << 31) - 1)); } #endif } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i = 0; #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(count, saved_key, crypt_key) #endif #if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP) for (i = 0; i < count; i++) #endif { unsigned char *p = (unsigned char *)saved_key[i]; if (*p) { ARCH_WORD_32 nr, add; ARCH_WORD_32 tmp; while (*p == ' ' || *p == '\t') p++; tmp = (ARCH_WORD_32) (unsigned char) *p++; nr = 1345345333 ^ ((((1345345333 & 63) + 7) * tmp) + (1345345333U << 8)); add = 7 + tmp; for (; *p; p++) { if (*p == ' ' || *p == '\t') continue; tmp = (ARCH_WORD_32) (unsigned char) *p; nr ^= (((nr & 63) + add) * tmp) + (nr << 8); add += tmp; } crypt_key[i][0] = (nr & (((ARCH_WORD_32)1 << 31) - 1)); #if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP) continue; #else return count; #endif } crypt_key[i][0] = (1345345333 & (((ARCH_WORD_32)1 << 31) - 1)); } return count; } static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } struct fmt_main fmt_MYSQL_fast = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__div_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_int8 // A.*B function (eWiseMult): GB_AemultB__div_int8 // A*D function (colscale): GB_AxD__div_int8 // D*A function (rowscale): GB_DxB__div_int8 // C+=B function (dense accum): GB_Cdense_accumB__div_int8 // C+=b function (dense accum): GB_Cdense_accumb__div_int8 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_int8 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_int8 // C=scalar+B GB_bind1st__div_int8 // C=scalar+B' GB_bind1st_tran__div_int8 // C=A+scalar GB_bind2nd__div_int8 // C=A'+scalar GB_bind2nd_tran__div_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IDIV_SIGNED (x, y, 8) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_INT8 || GxB_NO_DIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__div_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_IDIV_SIGNED (x, bij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = GB_IDIV_SIGNED (aij, y, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (x, aij, 8) ; \ } GrB_Info GB_bind1st_tran__div_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (aij, y, 8) ; \ } GrB_Info GB_bind2nd_tran__div_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
FEM1D.c
# include <stdlib.h> # include <stdio.h> # include <time.h> # include <stdbool.h> # include <omp.h> # include "mpi.h" # define NSUB 1000000 # define NL 100 # define MASTER 0 // Setting these to false will not display output in the console, which significantly improves performance. bool printProcessingOutput = false; bool printResultOutput = false; int main(int argc, char *argv[]); void assemble(double adiag[], double aleft[], double arite[], double f[], double h[], int indx[], int nl, int node[], int nu, int nquad, int nsub, double ul, double ur, double xn[], double xquad[]); double ff(double x); void geometry(double h[], int ibc, int indx[], int nl, int node[], int nsub, int *nu, double xl, double xn[], double xquad[], double xr); void init(int *ibc, int *nquad, double *ul, double *ur, double *xl, double *xr); void output(double f[], int ibc, int indx[], int nsub, int nu, double ul, double ur, double xn[]); void phi(int il, double x, double *phii, double *phiix, double xleft, double xrite); double pp(double x); void prsys(double adiag[], double aleft[], double arite[], double f[], int nu); double qq(double x); void solve(double adiag[], double aleft[], double arite[], double f[], int nu); void timestamp(void); int taskid, numtasks, chunksize; /******************************************************************************/ int main(int argc, char *argv[]) /******************************************************************************/ /* Purpose: MAIN is the main program for FEM1D. Discussion: FEM1D solves a one dimensional ODE using the finite element method. The differential equation solved is - d/dX (P dU/dX) + Q U = F The finite-element method uses piecewise linear basis functions. Here U is an unknown scalar function of X defined on the interval [XL,XR], and P, Q and F are given functions of X. The values of U or U' at XL and XR are also specified. The interval [XL,XR] is "meshed" with NSUB+1 points, XN(0) = XL, XN(1)=XL+H, XN(2)=XL+2*H, ..., XN(NSUB)=XR. This creates NSUB subintervals, with interval number 1 having endpoints XN(0) and XN(1), and so on up to interval NSUB, which has endpoints XN(NSUB-1) and XN(NSUB). Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: C version by John Burkardt Parameters: double ADIAG(NU), the "diagonal" coefficients. That is, ADIAG(I) is the coefficient of the I-th unknown in the I-th equation. double ALEFT(NU), the "left hand" coefficients. That is, ALEFT(I) is the coefficient of the (I-1)-th unknown in the I-th equation. There is no value in ALEFT(1), since the first equation does not refer to a "0-th" unknown. double ARITE(NU). ARITE(I) is the "right hand" coefficient of the I-th equation in the linear system. ARITE(I) is the coefficient of the (I+1)-th unknown in the I-th equation. There is no value in ARITE(NU) because the NU-th equation does not refer to an "NU+1"-th unknown. double F(NU). ASSEMBLE stores into F the right hand side of the linear equations. SOLVE replaces those values of F by the solution of the linear equations. double H(NSUB) H(I) is the length of subinterval I. This code uses equal spacing for all the subintervals. int IBC. IBC declares what the boundary conditions are. 1, at the left endpoint, U has the value UL, at the right endpoint, U' has the value UR. 2, at the left endpoint, U' has the value UL, at the right endpoint, U has the value UR. 3, at the left endpoint, U has the value UL, and at the right endpoint, U has the value UR. 4, at the left endpoint, U' has the value UL, at the right endpoint U' has the value UR. int INDX[NSUB+1]. For a node I, INDX(I) is the index of the unknown associated with node I. If INDX(I) is equal to -1, then no unknown is associated with the node, because a boundary condition fixing the value of U has been applied at the node instead. Unknowns are numbered beginning with 1. If IBC is 2 or 4, then there is an unknown value of U at node 0, which will be unknown number 1. Otherwise, unknown number 1 will be associated with node 1. If IBC is 1 or 4, then there is an unknown value of U at node NSUB, which will be unknown NSUB or NSUB+1, depending on whether there was an unknown at node 0. int NL. The number of basis functions used in a single subinterval. (NL-1) is the degree of the polynomials used. For this code, NL is fixed at 2, meaning that piecewise linear functions are used as the basis. int NODE[NL*NSUB]. For each subinterval I: NODE[0+I*2] is the number of the left node, and NODE[1+I*2] is the number of the right node. int NQUAD. The number of quadrature points used in a subinterval. This code uses NQUAD = 1. int NSUB. The number of subintervals into which the interval [XL,XR] is broken. int NU. NU is the number of unknowns in the linear system. Depending on the value of IBC, there will be NSUB-1, NSUB, or NSUB+1 unknown values, which are the coefficients of basis functions. double UL. If IBC is 1 or 3, UL is the value that U is required to have at X = XL. If IBC is 2 or 4, UL is the value that U' is required to have at X = XL. double UR. If IBC is 2 or 3, UR is the value that U is required to have at X = XR. If IBC is 1 or 4, UR is the value that U' is required to have at X = XR. double XL. XL is the left endpoint of the interval over which the differential equation is being solved. double XN(0:NSUB). XN(I) is the location of the I-th node. XN(0) is XL, and XN(NSUB) is XR. double XQUAD(NSUB) XQUAD(I) is the location of the single quadrature point in interval I. double XR. XR is the right endpoint of the interval over which the differential equation is being solved. */ { // Set up MPI int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided); omp_set_num_threads(4); MPI_Comm_size(MPI_COMM_WORLD, &numtasks); MPI_Comm_rank(MPI_COMM_WORLD, &taskid); // Allocate all arrays in the heap double *adiag = (double *)malloc(sizeof(double)*(NSUB + 1)); double *aleft = (double *)malloc(sizeof(double)*(NSUB + 1)); double *arite = (double *)malloc(sizeof(double)*(NSUB + 1)); double *f = (double *)malloc(sizeof(double)*(NSUB + 1)); double *h = (double *)malloc(sizeof(double)*NSUB); int ibc; int *indx = (int *)malloc(sizeof(int)*(NSUB + 1)); int *node = (int *)malloc(2*sizeof(int)*(NL * NSUB)); int nquad; int nu; double ul; double ur; double xl; double *xn = (double *)malloc(sizeof(double)*(NSUB + 1)); double *xquad = (double *)malloc(sizeof(double)*NSUB); double xr; // Start timer double start = omp_get_wtime(); // Master thread displays information if (taskid == MASTER) { timestamp(); printf("\n"); printf("FEM1D\n"); printf(" C version\n"); printf("\n"); printf(" Solve the two-point boundary value problem\n"); printf("\n"); printf(" - d/dX (P dU/dX) + Q U = F\n"); printf("\n"); printf(" on the interval [XL,XR], specifying\n"); printf(" the value of U or U' at each end.\n"); printf("\n"); printf(" The interval [XL,XR] is broken into NSUB = %d subintervals\n", NSUB); printf(" Number of basis functions per element is NL = %d\n", NL); } /* Initialize the data. */ init(&ibc, &nquad, &ul, &ur, &xl, &xr); /* Compute the geometric quantities. */ geometry(h, ibc, indx, NL, node, NSUB, &nu, xl, xn, xquad, xr); /* Assemble the linear system. */ assemble(adiag, aleft, arite, f, h, indx, NL, node, nu, nquad, NSUB, ul, ur, xn, xquad); if (taskid == MASTER) { /* Print out the linear system. */ if (printProcessingOutput) prsys(adiag, aleft, arite, f, nu); /* Solve the linear system. */ // Allow master thread to do all solve calculations due to this step taking a // fraction of a second and not worth using MPI for solve(adiag, aleft, arite, f, nu); /* Print out the solution. */ if (printResultOutput) output(f, ibc, indx, NSUB, nu, ul, ur, xn); /* Terminate. */ printf("\n"); printf("FEM1D:\n"); printf(" Normal end of execution.\n"); printf("\n"); timestamp(); double end = omp_get_wtime() - start; printf("Total execution time: %f seconds.\n", end); } MPI_Finalize(); return 0; # undef NL # undef NSUB } /******************************************************************************/ void assemble(double adiag[], double aleft[], double arite[], double f[], double h[], int indx[], int nl, int node[], int nu, int nquad, int nsub, double ul, double ur, double xn[], double xquad[]) /******************************************************************************/ /* Purpose: ASSEMBLE assembles the matrix and right-hand-side of the linear system. Discussion: The linear system has the form: K * C = F that is to be solved for the coefficients C. Numerical integration is used to compute the entries of K and F. Note that a 1 point quadrature rule, which is sometimes used to assemble the matrix and right hand side, is just barely accurate enough for simple problems. If you want better results, you should use a quadrature rule that is more accurate. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: C version by John Burkardt Parameters: Output, double ADIAG(NU), the "diagonal" coefficients. That is, ADIAG(I) is the coefficient of the I-th unknown in the I-th equation. Output, double ALEFT(NU), the "left hand" coefficients. That is, ALEFT(I) is the coefficient of the (I-1)-th unknown in the I-th equation. There is no value in ALEFT(1), since the first equation does not refer to a "0-th" unknown. Output, double ARITE(NU). ARITE(I) is the "right hand" coefficient of the I-th equation in the linear system. ARITE(I) is the coefficient of the (I+1)-th unknown in the I-th equation. There is no value in ARITE(NU) because the NU-th equation does not refer to an "NU+1"-th unknown. Output, double F(NU). ASSEMBLE stores into F the right hand side of the linear equations. SOLVE replaces those values of F by the solution of the linear equations. Input, double H(NSUB) H(I) is the length of subinterval I. This code uses equal spacing for all the subintervals. Input, int INDX[NSUB+1]. For a node I, INDX(I) is the index of the unknown associated with node I. If INDX(I) is equal to -1, then no unknown is associated with the node, because a boundary condition fixing the value of U has been applied at the node instead. Unknowns are numbered beginning with 1. If IBC is 2 or 4, then there is an unknown value of U at node 0, which will be unknown number 1. Otherwise, unknown number 1 will be associated with node 1. If IBC is 1 or 4, then there is an unknown value of U at node NSUB, which will be unknown NSUB or NSUB+1, depending on whether there was an unknown at node 0. Input, int NL. The number of basis functions used in a single subinterval. (NL-1) is the degree of the polynomials used. For this code, NL is fixed at 2, meaning that piecewise linear functions are used as the basis. Input, int NODE[NL*NSUB]. For each subinterval I: NODE[0+I*2] is the number of the left node, and NODE[1+I*2] is the number of the right node. Input, int NU. NU is the number of unknowns in the linear system. Depending on the value of IBC, there will be NSUB-1, NSUB, or NSUB+1 unknown values, which are the coefficients of basis functions. Input, int NQUAD. The number of quadrature points used in a subinterval. This code uses NQUAD = 1. Input, int NSUB. The number of subintervals into which the interval [XL,XR] is broken. Input, double UL. If IBC is 1 or 3, UL is the value that U is required to have at X = XL. If IBC is 2 or 4, UL is the value that U' is required to have at X = XL. Input, double UR. If IBC is 2 or 3, UR is the value that U is required to have at X = XR. If IBC is 1 or 4, UR is the value that U' is required to have at X = XR. Input, double XL. XL is the left endpoint of the interval over which the differential equation is being solved. Input, double XR. XR is the right endpoint of the interval over which the differential equation is being solved. */ { double aij; double he; int i; int ie; int ig; int il; int iq; int iu; int jg; int jl; int ju; double phii; double phiix; double phij; double phijx; double x; double xleft; double xquade; double xrite; int task; int offset; // Allocate local arrays for each node to work with double *local_adiag = (double *)malloc(sizeof(double)*(nsub + 1)); double *local_aleft = (double *)malloc(sizeof(double)*(nsub + 1)); double *local_arite = (double *)malloc(sizeof(double)*(nsub + 1)); double *local_f = (double *)malloc(sizeof(double)*(nsub + 1)); /* Zero out the arrays that hold the coefficients of the matrix and the right hand side. */ double zeroStart = omp_get_wtime(); // Use OpenMP to speed this step #pragma omp parallel { // Only MASTER needs these arrays so only zeros out on MASTER if (taskid == MASTER) { #pragma omp for for (i = 0; i < nu; i++) { f[i] = 0.0; } #pragma omp for for (i = 0; i < nu; i++) { adiag[i] = 0.0; } #pragma omp for for (i = 0; i < nu; i++) { aleft[i] = 0.0; } #pragma omp for for (i = 0; i < nu; i++) { arite[i] = 0.0; } } // All nodes zero out their arrays #pragma omp for for (i = 0; i < nu; i++) { local_f[i] = 0.0; } #pragma omp for for (i = 0; i < nu; i++) { local_adiag[i] = 0.0; } #pragma omp for for (i = 0; i < nu; i++) { local_aleft[i] = 0.0; } #pragma omp for for (i = 0; i < nu; i++) { local_arite[i] = 0.0; } } double zeroEnd = omp_get_wtime() - zeroStart; if (taskid == MASTER) printf("\n Zeroing out arrays took %f seconds.\n", zeroEnd); /* For interval number IE, */ double assembleStart = omp_get_wtime(); // Set chunksize and offset chunksize = nsub / numtasks; offset = chunksize * taskid; // Project 1 note: // After much trial and error it was discovered that adding all those variables as private made it run much faster. // The static schedule makes a very tiny difference // Project 2 note: // Now each node works on their own part of the calculation determined by chunksize and offset #pragma omp parallel for private(ie, iq, il, iu, ig, he, xleft, xrite, xquade, jl, jg, ju, aij, x, phiix, phijx, phii, phij) schedule(static, 1000) for (ie = (chunksize * taskid); ie < ((chunksize * taskid) + chunksize); ie++) { he = h[ie]; xleft = xn[node[0 + ie * 2]]; xrite = xn[node[1 + ie * 2]]; /* consider each quadrature point IQ, */ for (iq = 0; iq < nquad; iq++) { xquade = xquad[ie]; /* and evaluate the integrals associated with the basis functions for the left, and for the right nodes. */ for (il = 1; il <= nl; il++) { ig = node[il - 1 + ie * 2]; iu = indx[ig] - 1; if (0 <= iu) { phi(il, xquade, &phii, &phiix, xleft, xrite); local_f[iu] += he * ff(xquade) * phii; /* Take care of boundary nodes at which U' was specified. */ if (ig == 0) { x = 0.0; local_f[iu] -= pp(x) * ul; } else if (ig == nsub) { x = 1.0; local_f[iu] += pp(x) * ur; } /* Evaluate the integrals that take a product of the basis function times itself, or times the other basis function that is nonzero in this interval. */ for (jl = 1; jl <= nl; jl++) { jg = node[jl - 1 + ie * 2]; ju = indx[jg] - 1; phi(jl, xquade, &phij, &phijx, xleft, xrite); aij = he * (pp(xquade) * phiix * phijx + qq(xquade) * phii * phij); /* If there is no variable associated with the node, then it's a specified boundary value, so we multiply the coefficient times the specified boundary value and subtract it from the right hand side. */ if (ju < 0) { if (jg == 0) { local_f[iu] -= aij * ul; } else if (jg == nsub) { local_f[iu] -= aij * ur; } } /* Otherwise, we add the coefficient we've just computed to the diagonal, or left or right entries of row IU of the matrix. */ else { if (iu == ju) { local_adiag[iu] += aij; } else if (ju < iu) { local_aleft[iu] += aij; } else { local_arite[iu] += aij; } } } } } } } // Combine local arrays into MASTER's array MPI_Reduce(local_f, f, nsub+1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD); MPI_Reduce(local_adiag, adiag, nsub+1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD); MPI_Reduce(local_aleft, aleft, nsub+1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD); MPI_Reduce(local_arite, arite, nsub+1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD); // Free local arrays free(local_f); free(local_adiag); free(local_aleft); free(local_arite); if (taskid == MASTER) { double assembleEnd = omp_get_wtime() - assembleStart; printf("\n Assembling arrays took %f seconds.\n", assembleEnd); } return; } /******************************************************************************/ double ff(double x) /******************************************************************************/ /* Purpose: FF evaluates the right hand side function. Discussion: This routine evaluates the function F(X) in the differential equation. -d/dx (p du/dx) + q u = f at the point X. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: John Burkardt Parameters: Input, double X, the argument of the function. Output, double FF, the value of the function. */ { double value; value = 0.0; return value; } /******************************************************************************/ void geometry(double h[], int ibc, int indx[], int nl, int node[], int nsub, int *nu, double xl, double xn[], double xquad[], double xr) /******************************************************************************/ /* Purpose: GEOMETRY sets up the geometry for the interval [XL,XR]. Modified: 29 May 2009 Author: C version by John Burkardt Parameters: Output, double H(NSUB) H(I) is the length of subinterval I. This code uses equal spacing for all the subintervals. Input, int IBC. IBC declares what the boundary conditions are. 1, at the left endpoint, U has the value UL, at the right endpoint, U' has the value UR. 2, at the left endpoint, U' has the value UL, at the right endpoint, U has the value UR. 3, at the left endpoint, U has the value UL, and at the right endpoint, U has the value UR. 4, at the left endpoint, U' has the value UL, at the right endpoint U' has the value UR. Output, int INDX[NSUB+1]. For a node I, INDX(I) is the index of the unknown associated with node I. If INDX(I) is equal to -1, then no unknown is associated with the node, because a boundary condition fixing the value of U has been applied at the node instead. Unknowns are numbered beginning with 1. If IBC is 2 or 4, then there is an unknown value of U at node 0, which will be unknown number 1. Otherwise, unknown number 1 will be associated with node 1. If IBC is 1 or 4, then there is an unknown value of U at node NSUB, which will be unknown NSUB or NSUB+1, depending on whether there was an unknown at node 0. Input, int NL. The number of basis functions used in a single subinterval. (NL-1) is the degree of the polynomials used. For this code, NL is fixed at 2, meaning that piecewise linear functions are used as the basis. Output, int NODE[NL*NSUB]. For each subinterval I: NODE[0+I*2] is the number of the left node, and NODE[1+I*2] is the number of the right node. Input, int NSUB. The number of subintervals into which the interval [XL,XR] is broken. Output, int *NU. NU is the number of unknowns in the linear system. Depending on the value of IBC, there will be NSUB-1, NSUB, or NSUB+1 unknown values, which are the coefficients of basis functions. Input, double XL. XL is the left endpoint of the interval over which the differential equation is being solved. Output, double XN(0:NSUB). XN(I) is the location of the I-th node. XN(0) is XL, and XN(NSUB) is XR. Output, double XQUAD(NSUB) XQUAD(I) is the location of the single quadrature point in interval I. Input, double XR. XR is the right endpoint of the interval over which the differential equation is being solved. */ { int i; /* Set the value of XN, the locations of the nodes. */ if (printProcessingOutput) { printf("\n"); printf(" Node Location\n"); printf("\n"); } double nodeLocationsStart = omp_get_wtime(); // This only helps in very high values of NSUB #pragma omp parallel for for (i = 0; i <= nsub; i++) { xn[i] = ((double)(nsub - i) * xl + (double)i * xr) / (double)(nsub); } if (printProcessingOutput) { for (i = 0; i <= nsub; i++) printf(" %8d %14f \n", i, xn[i]); } double nodeLocationsEnd = omp_get_wtime() - nodeLocationsStart; if (taskid == MASTER) { printf("\n Setting node locations took %f seconds.\n", nodeLocationsEnd); if (printProcessingOutput) { printf("\n"); printf("Subint Length\n"); printf("\n"); } } double subintervalLengthsStart = omp_get_wtime(); /* Set the lengths of each subinterval. */ #pragma omp parallel for for (i = 0; i < nsub; i++) { h[i] = xn[i + 1] - xn[i]; } if (printProcessingOutput) { for (i = 0; i < nsub; i++) printf(" %8d %14f\n", i + 1, h[i]); } double subintervalLengthsEnd = omp_get_wtime() - subintervalLengthsStart; if (taskid == MASTER) { printf("\n Setting lengths of each subinterval took %f seconds.\n", subintervalLengthsEnd); if (printProcessingOutput) { printf("\n"); printf("Subint Quadrature point\n"); printf("\n"); } } double quadraturePointsStart = omp_get_wtime(); /* Set the quadrature points, each of which is the midpoint of its subinterval. */ #pragma omp parallel for for (i = 0; i < nsub; i++) { xquad[i] = 0.5 * (xn[i] + xn[i + 1]); } if (printProcessingOutput) { for (i = 0; i < nsub; i++) printf(" %8d %14f\n", i + 1, xquad[i]); } double quadraturePointsEnd = omp_get_wtime() - quadraturePointsStart; if (taskid == MASTER) printf("\n Setting quadrature points took %f seconds.\n", quadraturePointsEnd); double setValueOfNodeStart = omp_get_wtime(); if (printProcessingOutput) { printf("\n"); printf("Subint Left Node Right Node\n"); printf("\n"); } /* Set the value of NODE, which records, for each interval, the node numbers at the left and right. */ // Once again a parallel for only improves processing time with very large NSUB values #pragma omp parallel for for (i = 0; i < nsub; i++) { node[0 + i * 2] = i; node[1 + i * 2] = i + 1; } if (printProcessingOutput) { for (i = 0; i < nsub; i++) printf(" %8d %8d %8d\n", i + 1, node[0 + i * 2], node[1 + i * 2]); } double setValueOfNodeEnd = omp_get_wtime() - setValueOfNodeStart; if (taskid == MASTER) printf("\n Setting value of NODE took %f seconds.\n", setValueOfNodeEnd); /* Starting with node 0, see if an unknown is associated with the node. If so, give it an index. */ double indexUnknownNodesStart = omp_get_wtime(); *nu = 0; /* Handle first node. */ i = 0; if (ibc == 1 || ibc == 3) { indx[i] = -1; } else { *nu = *nu + 1; indx[i] = *nu; } /* Handle nodes 1 through nsub-1 */ for (i = 1; i < nsub; i++) { *nu = *nu + 1; indx[i] = *nu; } /* Handle the last node. /*/ i = nsub; if (ibc == 2 || ibc == 3) { indx[i] = -1; } else { *nu = *nu + 1; indx[i] = *nu; } double indexUnknownNodesEnd = omp_get_wtime() - indexUnknownNodesStart; if (taskid == MASTER) printf("\n Indexing unknown nodes took %f seconds.\n", indexUnknownNodesEnd); if (printProcessingOutput) { printf("\n"); printf(" Number of unknowns NU = %8d\n", *nu); printf("\n"); printf(" Node Unknown\n"); printf("\n"); for (i = 0; i <= nsub; i++) { printf(" %8d %8d\n", i, indx[i]); } } return; } /******************************************************************************/ void init(int *ibc, int *nquad, double *ul, double *ur, double *xl, double *xr) /******************************************************************************/ /* Purpose: INIT assigns values to variables which define the problem. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: C version by John Burkardt Parameters: Output, int *IBC. IBC declares what the boundary conditions are. 1, at the left endpoint, U has the value UL, at the right endpoint, U' has the value UR. 2, at the left endpoint, U' has the value UL, at the right endpoint, U has the value UR. 3, at the left endpoint, U has the value UL, and at the right endpoint, U has the value UR. 4, at the left endpoint, U' has the value UL, at the right endpoint U' has the value UR. Output, int *NQUAD. The number of quadrature points used in a subinterval. This code uses NQUAD = 1. Output, double *UL. If IBC is 1 or 3, UL is the value that U is required to have at X = XL. If IBC is 2 or 4, UL is the value that U' is required to have at X = XL. Output, double *UR. If IBC is 2 or 3, UR is the value that U is required to have at X = XR. If IBC is 1 or 4, UR is the value that U' is required to have at X = XR. Output, double *XL. XL is the left endpoint of the interval over which the differential equation is being solved. Output, double *XR. XR is the right endpoint of the interval over which the differential equation is being solved. */ { /* IBC declares what the boundary conditions are. */ *ibc = 1; /* NQUAD is the number of quadrature points per subinterval. The program as currently written cannot handle any value for NQUAD except 1. */ *nquad = 1; /* Set the values of U or U' at the endpoints. */ *ul = 0.0; *ur = 1.0; /* Define the location of the endpoints of the interval. */ *xl = 0.0; *xr = 1.0; /* Print out the values that have been set. */ if (printProcessingOutput) { printf("\n"); printf(" The equation is to be solved for\n"); printf(" X greater than XL = %f\n", *xl); printf(" and less than XR = %f\n", *xr); printf("\n"); printf(" The boundary conditions are:\n"); printf("\n"); if (*ibc == 1 || *ibc == 3) { printf(" At X = XL, U = %f\n", *ul); } else { printf(" At X = XL, U' = %f\n", *ul); } if (*ibc == 2 || *ibc == 3) { printf(" At X = XR, U = %f\n", *ur); } else { printf(" At X = XR, U' = %f\n", *ur); } printf("\n"); printf(" Number of quadrature points per element is %d\n", *nquad); } return; } /******************************************************************************/ void output(double f[], int ibc, int indx[], int nsub, int nu, double ul, double ur, double xn[]) /******************************************************************************/ /* Purpose: OUTPUT prints out the computed solution. Discussion: We simply print out the solution vector F, except that, for certain boundary conditions, we are going to have to get the value of the solution at XL or XR by using the specified boundary value. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: C version by John Burkardt Parameters: Input, double F(NU). ASSEMBLE stores into F the right hand side of the linear equations. SOLVE replaces those values of F by the solution of the linear equations. Input, int IBC. IBC declares what the boundary conditions are. 1, at the left endpoint, U has the value UL, at the right endpoint, U' has the value UR. 2, at the left endpoint, U' has the value UL, at the right endpoint, U has the value UR. 3, at the left endpoint, U has the value UL, and at the right endpoint, U has the value UR. 4, at the left endpoint, U' has the value UL, at the right endpoint U' has the value UR. Input, int INDX[NSUB+1]. For a node I, INDX(I) is the index of the unknown associated with node I. If INDX(I) is equal to -1, then no unknown is associated with the node, because a boundary condition fixing the value of U has been applied at the node instead. Unknowns are numbered beginning with 1. If IBC is 2 or 4, then there is an unknown value of U at node 0, which will be unknown number 1. Otherwise, unknown number 1 will be associated with node 1. If IBC is 1 or 4, then there is an unknown value of U at node NSUB, which will be unknown NSUB or NSUB+1, depending on whether there was an unknown at node 0. Input, int NSUB. The number of subintervals into which the interval [XL,XR] is broken. Input, int NU. NU is the number of unknowns in the linear system. Depending on the value of IBC, there will be NSUB-1, NSUB, or NSUB+1 unknown values, which are the coefficients of basis functions. Input, double UL. If IBC is 1 or 3, UL is the value that U is required to have at X = XL. If IBC is 2 or 4, UL is the value that U' is required to have at X = XL. Input, double UR. If IBC is 2 or 3, UR is the value that U is required to have at X = XR. If IBC is 1 or 4, UR is the value that U' is required to have at X = XR. Input, double XN(0:NSUB). XN(I) is the location of the I-th node. XN(0) is XL, and XN(NSUB) is XR. */ { int i; double u; printf("\n"); printf(" Computed solution coefficients:\n"); printf("\n"); printf(" Node X(I) U(X(I))\n"); printf("\n"); for (i = 0; i <= nsub; i++) { /* If we're at the first node, check the boundary condition. */ if (i == 0) { if (ibc == 1 || ibc == 3) { u = ul; } else { u = f[indx[i] - 1]; } } /* If we're at the last node, check the boundary condition. */ else if (i == nsub) { if (ibc == 2 || ibc == 3) { u = ur; } else { u = f[indx[i] - 1]; } } /* Any other node, we're sure the value is stored in F. */ else { u = f[indx[i] - 1]; } printf(" %8d %8f %14f\n", i, xn[i], u); } return; } /******************************************************************************/ void phi(int il, double x, double *phii, double *phiix, double xleft, double xrite) /******************************************************************************/ /* Purpose: PHI evaluates a linear basis function and its derivative. Discussion: The evaluation is done at a point X in an interval [XLEFT,XRITE]. In this interval, there are just two nonzero basis functions. The first basis function is a line which is 1 at the left endpoint and 0 at the right. The second basis function is 0 at the left endpoint and 1 at the right. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: C version by John Burkardt Parameters: Input, int IL, the index of the basis function. 1, the function which is 1 at XLEFT and 0 at XRITE. 2, the function which is 0 at XLEFT and 1 at XRITE. Input, double X, the evaluation point. Output, double *PHII, *PHIIX, the value of the basis function and its derivative at X. Input, double XLEFT, XRITE, the left and right endpoints of the interval. */ { if (xleft <= x && x <= xrite) { if (il == 1) { *phii = (xrite - x) / (xrite - xleft); *phiix = -1.0 / (xrite - xleft); } else { *phii = (x - xleft) / (xrite - xleft); *phiix = 1.0 / (xrite - xleft); } } /* If X is outside of the interval, just set everything to 0. */ else { *phii = 0.0; *phiix = 0.0; } return; } /******************************************************************************/ double pp(double x) /******************************************************************************/ /* Purpose: PP evaluates the function P in the differential equation. Discussion: The function P appears in the differential equation as; - d/dx (p du/dx) + q u = f Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: John Burkardt Parameters: Input, double X, the argument of the function. Output, double PP, the value of the function. */ { double value; value = 1.0; return value; } /******************************************************************************/ void prsys(double adiag[], double aleft[], double arite[], double f[], int nu) /******************************************************************************/ /* Purpose: PRSYS prints out the tridiagonal linear system. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: C version by John Burkardt Parameter: Input, double ADIAG(NU), the "diagonal" coefficients. That is, ADIAG(I) is the coefficient of the I-th unknown in the I-th equation. Input, double ALEFT(NU), the "left hand" coefficients. That is, ALEFT(I) is the coefficient of the (I-1)-th unknown in the I-th equation. There is no value in ALEFT(1), since the first equation does not refer to a "0-th" unknown. Input, double ARITE(NU). ARITE(I) is the "right hand" coefficient of the I-th equation in the linear system. ARITE(I) is the coefficient of the (I+1)-th unknown in the I-th equation. There is no value in ARITE(NU) because the NU-th equation does not refer to an "NU+1"-th unknown. Input, double F(NU). ASSEMBLE stores into F the right hand side of the linear equations. SOLVE replaces those values of F by the solution of the linear equations. Input, int NU. NU is the number of unknowns in the linear system. Depending on the value of IBC, there will be NSUB-1, NSUB, or NSUB+1 unknown values, which are the coefficients of basis functions. */ { int i; printf("\n"); printf("Printout of tridiagonal linear system:\n"); printf("\n"); printf("Equation ALEFT ADIAG ARITE RHS\n"); printf("\n"); for (i = 0; i < nu; i++) { printf(" %8d %14f %14f %14f %14f\n", i + 1, aleft[i], adiag[i], arite[i], f[i]); } return; } /******************************************************************************/ double qq(double x) /******************************************************************************/ /* Purpose: QQ evaluates the function Q in the differential equation. Discussion: The function Q appears in the differential equation as: - d/dx (p du/dx) + q u = f Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: John Burkardt Parameters: Input, double X, the argument of the function. Output, double QQ, the value of the function. */ { double value; value = 0.0; return value; } /******************************************************************************/ void solve(double adiag[], double aleft[], double arite[], double f[], int nu) /******************************************************************************/ /* Purpose: SOLVE solves a tridiagonal matrix system of the form A*x = b. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 May 2009 Author: C version by John Burkardt Parameters: Input/output, double ADIAG(NU), ALEFT(NU), ARITE(NU). On input, ADIAG, ALEFT, and ARITE contain the diagonal, left and right entries of the equations. On output, ADIAG and ARITE have been changed in order to compute the solution. Note that for the first equation, there is no ALEFT coefficient, and for the last, there is no ARITE. So there is no need to store a value in ALEFT(1), nor in ARITE(NU). Input/output, double F(NU). On input, F contains the right hand side of the linear system to be solved. On output, F contains the solution of the linear system. Input, int NU, the number of equations to be solved. */ { double solveStart = omp_get_wtime(); int i; /* Carry out Gauss elimination on the matrix, saving information needed for the backsolve. */ arite[0] = arite[0] / adiag[0]; for (i = 1; i < nu - 1; i++) { adiag[i] = adiag[i] - aleft[i] * arite[i - 1]; arite[i] = arite[i] / adiag[i]; } adiag[nu - 1] = adiag[nu - 1] - aleft[nu - 1] * arite[nu - 2]; /* Carry out the same elimination steps on F that were done to the matrix. */ f[0] = f[0] / adiag[0]; for (i = 1; i < nu; i++) { f[i] = (f[i] - aleft[i] * f[i - 1]) / adiag[i]; } /* And now carry out the steps of "back substitution". */ for (i = nu - 2; 0 <= i; i--) { f[i] = f[i] - arite[i] * f[i + 1]; } double solveEnd = omp_get_wtime() - solveStart; printf("\n Solving took %f seconds.\n", solveEnd); return; } /******************************************************************************/ void timestamp(void) /******************************************************************************/ /* Purpose: TIMESTAMP prints the current YMDHMS date as a time stamp. Example: 31 May 2001 09:45:54 AM Licensing: This code is distributed under the GNU LGPL license. Modified: 24 September 2003 Author: John Burkardt Parameters: None */ { # define TIME_SIZE 40 static char time_buffer[TIME_SIZE]; const struct tm *tm; size_t len; time_t now; now = time(NULL); tm = localtime(&now); len = strftime(time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm); printf("%s\n", time_buffer); return; # undef TIME_SIZE }
nbnxn_kernel_simd_4xn.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /* * Note: this file was generated by the Verlet kernel generator for * kernel type 4xn. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "typedefs.h" #ifdef GMX_NBNXN_SIMD_4XN #ifdef GMX_NBNXN_HALF_WIDTH_SIMD #define GMX_USE_HALF_WIDTH_SIMD_HERE #endif #include "gmx_simd_macros.h" #include "gmx_simd_vec.h" #if !(GMX_SIMD_WIDTH_HERE == 2 || GMX_SIMD_WIDTH_HERE == 4 || GMX_SIMD_WIDTH_HERE == 8) #error "unsupported SIMD width" #endif #define GMX_SIMD_J_UNROLL_SIZE 1 #include "nbnxn_kernel_simd_4xn.h" #include "../nbnxn_kernel_common.h" #include "gmx_omp_nthreads.h" #include "types/force_flags.h" /*! \brief Kinds of electrostatic treatments in SIMD Verlet kernels */ enum { coultRF, coultTAB, coultTAB_TWIN, coultEWALD, coultEWALD_TWIN, coultNR }; /* Declare and define the kernel function pointer lookup tables. */ static p_nbk_func_ener p_nbk_ener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_ener, nbnxn_kernel_simd_4xn_rf_comb_lb_ener, nbnxn_kernel_simd_4xn_rf_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_ener, }, }; static p_nbk_func_ener p_nbk_energrp[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_energrp, nbnxn_kernel_simd_4xn_rf_comb_lb_energrp, nbnxn_kernel_simd_4xn_rf_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_energrp, }, }; static p_nbk_func_noener p_nbk_noener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_noener, nbnxn_kernel_simd_4xn_rf_comb_lb_noener, nbnxn_kernel_simd_4xn_rf_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_noener, }, }; static void reduce_group_energies(int ng, int ng_2log, const real *VSvdw, const real *VSc, real *Vvdw, real *Vc) { const int unrollj = GMX_SIMD_WIDTH_HERE/GMX_SIMD_J_UNROLL_SIZE; const int unrollj_half = unrollj/2; int ng_p2, i, j, j0, j1, c, s; ng_p2 = (1<<ng_2log); /* The size of the x86 SIMD energy group buffer array is: * ng*ng*ng_p2*unrollj_half*simd_width */ for (i = 0; i < ng; i++) { for (j = 0; j < ng; j++) { Vvdw[i*ng+j] = 0; Vc[i*ng+j] = 0; } for (j1 = 0; j1 < ng; j1++) { for (j0 = 0; j0 < ng; j0++) { c = ((i*ng + j1)*ng_p2 + j0)*unrollj_half*unrollj; for (s = 0; s < unrollj_half; s++) { Vvdw[i*ng+j0] += VSvdw[c+0]; Vvdw[i*ng+j1] += VSvdw[c+1]; Vc [i*ng+j0] += VSc [c+0]; Vc [i*ng+j1] += VSc [c+1]; c += unrollj + 2; } } } } } #else /* GMX_NBNXN_SIMD_4XN */ #include "gmx_fatal.h" #endif /* GMX_NBNXN_SIMD_4XN */ void nbnxn_kernel_simd_4xn(nbnxn_pairlist_set_t *nbl_list, const nbnxn_atomdata_t *nbat, const interaction_const_t *ic, int ewald_excl, rvec *shift_vec, int force_flags, int clearF, real *fshift, real *Vc, real *Vvdw) #ifdef GMX_NBNXN_SIMD_4XN { int nnbl; nbnxn_pairlist_t **nbl; int coult; int nb; nnbl = nbl_list->nnbl; nbl = nbl_list->nbl; if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT) { coult = coultRF; } else { if (ewald_excl == ewaldexclTable) { if (ic->rcoulomb == ic->rvdw) { coult = coultTAB; } else { coult = coultTAB_TWIN; } } else { if (ic->rcoulomb == ic->rvdw) { coult = coultEWALD; } else { coult = coultEWALD_TWIN; } } } #pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded)) for (nb = 0; nb < nnbl; nb++) { nbnxn_atomdata_output_t *out; real *fshift_p; out = &nbat->out[nb]; if (clearF == enbvClearFYes) { clear_f(nbat, nb, out->f); } if ((force_flags & GMX_FORCE_VIRIAL) && nnbl == 1) { fshift_p = fshift; } else { fshift_p = out->fshift; if (clearF == enbvClearFYes) { clear_fshift(fshift_p); } } if (!(force_flags & GMX_FORCE_ENERGY)) { /* Don't calculate energies */ p_nbk_noener[coult][nbat->comb_rule](nbl[nb], nbat, ic, shift_vec, out->f, fshift_p); } else if (out->nV == 1) { /* No energy groups */ out->Vvdw[0] = 0; out->Vc[0] = 0; p_nbk_ener[coult][nbat->comb_rule](nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->Vvdw, out->Vc); } else { /* Calculate energy group contributions */ int i; for (i = 0; i < out->nVS; i++) { out->VSvdw[i] = 0; } for (i = 0; i < out->nVS; i++) { out->VSc[i] = 0; } p_nbk_energrp[coult][nbat->comb_rule](nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->VSvdw, out->VSc); reduce_group_energies(nbat->nenergrp, nbat->neg_2log, out->VSvdw, out->VSc, out->Vvdw, out->Vc); } } if (force_flags & GMX_FORCE_ENERGY) { reduce_energies_over_lists(nbat, nnbl, Vvdw, Vc); } } #else { gmx_incons("nbnxn_kernel_simd_4xn called when such kernels " " are not enabled."); } #endif #undef GMX_SIMD_J_UNROLL_SIZE
kernel_launcher.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <sys/time.h> #include "homp.h" #include "stencil3d.h" #if defined (DEVICE_NVGPU_CUDA_SUPPORT) extern __global__ void stencil3d_nvgpu_kernel(int start_n, int len_n, long n, long m, int u_dimX, int u_dimY, REAL *u, REAL *uold, int radius, int coeff_dimX, REAL *coeff); #endif void stencil3d_omp_mdev_off_launcher(omp_offloading_t *off, void *args) { struct stencil3d_off_args * iargs = (struct stencil3d_off_args*) args; long n = iargs->n; long m = iargs->m; long k = iargs->k; int radius = iargs->radius; int num_its = iargs->num_its; long u_dimX = iargs->u_dimX; long u_dimY = iargs->u_dimY; long u_dimZ = iargs->u_dimZ; int coeff_dimX = iargs->coeff_dimX; omp_data_map_t * map_u = omp_map_get_map(off, iargs->u, -1); /* 1 is for the map u */ omp_data_map_t * map_uold = omp_map_get_map(off, iargs->uold, -1); /* 2 is for the map uld */ omp_data_map_t * map_coeff = omp_map_get_map(off, iargs->coeff, -1); /* 2 is for the map uld */ REAL * u = (REAL*) map_u->map_dev_wextra_ptr; REAL * uold = (REAL*) map_uold->map_dev_wextra_ptr; REAL *coeff = (REAL*) map_coeff->map_dev_wextra_ptr; coeff = coeff + (2*radius+1) * radius + radius; /* TODO this should be a call to map a host-side address to dev-side address*/ //NOTE: dont know the usage. int count = 6*radius+1; #ifdef SQUARE_SETNCIL count = coeff_dimX * coeff_dimX * coeff_dimX; #endif long it; /* iteration */ #if CORRECTNESS_CHECK printf("kernel launcher: u: %X, uold: %X\n", u, uold); print_array("u in device: ", "udev", u, n, m, k); print_array("uold in device: ", "uolddev", uold, n, m, k); #endif long offset; long start; long len; if (dist_dim == 1) { offset = omp_loop_get_range(off, 0, &start, &len); } else if (dist_dim == 2) { omp_loop_get_range(off, 0, &start, &len); } else /* vx == 3) */ { omp_loop_get_range(off, 0, &start, &len); /* todo */ omp_loop_get_range(off, 0, &start, &len); /* todo */ } omp_device_type_t devtype = off->dev->type; //printf("dev: %d, offset: %d, length: %d, local start: %d, u: %X, uold: %X, coeff-center: %X\n", off->devseqid, offset, len, start, u, uold, coeff); //#pragma omp parallel shared(n, m, radius, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold) for (it = 0; it < num_its; it++) { #if defined (DEVICE_NVGPU_CUDA_SUPPORT) if (devtype == OMP_DEVICE_NVGPU) { dim3 threads_per_team(16, 16);//NOTE: Dont know to increae it further dim3 teams_per_league((len+threads_per_team.x-1)/threads_per_team.x, (m+threads_per_team.y-1)/threads_per_team.y); /* we assume dividable */ stencil3d_nvgpu_kernel<<<teams_per_league, threads_per_team, 0, off->stream->systream.cudaStream>>> (start, len, n, m, k, u_dimX, u_dimY, u_dimZ, u, uold, radius, coeff_dimX, coeff); } else #endif if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) { #if CORRECTNESS_CHECK BEGIN_SERIALIZED_PRINTF(off->devseqid); printf("udev: dev: %d, %dX%d\n", off->devseqid, n, m); print_array_dev("udev", off->devseqid, "u",(REAL*)u, n, m, k); printf("uolddev: dev: %d, %dX%d\n", off->devseqid, uold_0_length, uold_1_length); print_array_dev("uolddev", off->devseqid, "uold",(REAL*)uold, uold_0_length, uold_1_length); printf("i_start: %d, j_start: %d, n: %d, m: %d, k: %d, uold_0_offset: %d, uold_1_offset: %d\n", i_start, j_start, n, m, k, uold_0_offset, uold_1_offset); END_SERIALIZED_PRINTF(); #endif //#pragma omp for private(ix, iy, ir) int ix, iy, iz, ir; for (ix = start; ix < start+len; ix++) { REAL * temp_u = &u[(ix+radius)*u_dimY*u_dimZ+radius]; REAL * temp_uold = &uold[(ix+radius)*u_dimY*u_dimZ+radius]; for (iy = 0; iy < m; iy++) { for (iz = 0; iz < k; iz++) { // if (off->devseqid == 0)printf("dev: %d, [%d][%d]:%f\n", off->devseqid, ix, iy, temp_u[0]); REAL result = temp_uold[0] * coeff[0]; /* 2/4 way loop unrolling */ for (ir = 1; ir <= radius; ir++) { result += coeff[ir] * temp_uold[ir]; //horizontal right result += coeff[-ir]* temp_uold[-ir]; // horizontal left result += coeff[-ir*coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up result += coeff[ir*coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom result += coeff[-ir*coeff_dimX] * temp_uold[-ir * u_dimZ]; //vertical up - z result += coeff[ir*coeff_dimX] * temp_uold[ir * u_dimZ]; // vertical bottom - z #ifdef SQUARE_SETNCIL result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimZ]-ir] // left bottom corner - Z result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimZ]+ir] // right bottom corner - Z #endif } *temp_u = result/count; temp_u++; temp_uold++; }//z loop end }//y end } } else { fprintf(stderr, "device type is not supported for this call\n"); } pthread_barrier_wait(&off->off_info->inter_dev_barrier); if (it % 2 == 0) omp_halo_region_pull(map_u, 0, OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT); else omp_halo_region_pull(map_uold, 0, OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT); REAL * tmp = uold; uold = u; u = tmp; } } void stencil3d_omp_mdev_iterate_off_launcher(omp_offloading_t * off, void *args) { struct stencil3d_off_args * iargs = (struct stencil3d_off_args*) args; long n = iargs->n; long m = iargs->m; long k = iargs->k; int radius = iargs->radius; int num_its = iargs->num_its; long u_dimX = iargs->u_dimX; long u_dimY = iargs->u_dimY; long u_dimZ = iargs->u_dimZ; int coeff_dimX = iargs->coeff_dimX; omp_data_map_t * map_u = omp_map_get_map(off, iargs->u, -1); /* 1 is for the map u */ omp_data_map_t * map_uold = omp_map_get_map(off, iargs->uold, -1); /* 2 is for the map uld */ omp_data_map_t * map_coeff = omp_map_get_map(off, iargs->coeff, -1); /* 2 is for the map uld */ REAL * u = (REAL*) map_u->map_dev_wextra_ptr; REAL * uold = (REAL*) map_uold->map_dev_wextra_ptr; REAL *coeff = (REAL*) map_coeff->map_dev_wextra_ptr; coeff = coeff + (2*radius+1) * radius + radius; /* TODO this should be a call to map a host-side address to dev-side address*/ //NOTE: No change introduced here, not sure of funtly int count = 6*radius+1; #ifdef SQUARE_SETNCIL count = coeff_dimX * coeff_dimX * coeff_dimX; #endif long it; /* iteration */ #if CORRECTNESS_CHECK printf("kernel launcher: u: %X, uold: %X\n", u, uold); print_array("u in device: ", "udev", u, n, m, k); print_array("uold in device: ", "uolddev", uold, n, m, k); #endif long offset; long start; long len; if (dist_dim == 1) { offset = omp_loop_get_range(off, 0, &start, &len); } else if (dist_dim == 2) { omp_loop_get_range(off, 0, &start, &len); } else /* vx == 3) */ { omp_loop_get_range(off, 0, &start, &len); /* todo */ omp_loop_get_range(off, 0, &start, &len); /* todo */ } omp_device_type_t devtype = off->dev->type; //printf("dev: %d, offset: %d, length: %d, local start: %d, u: %X, uold: %X, coeff-center: %X\n", off->devseqid, offset, len, start, u, uold, coeff); //#pragma omp parallel shared(n, m, radius, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold) #if defined (DEVICE_NVGPU_CUDA_SUPPORT) if (devtype == OMP_DEVICE_NVGPU) { dim3 threads_per_team(16, 16); dim3 teams_per_league((len+threads_per_team.x-1)/threads_per_team.x, (m+threads_per_team.y-1)/threads_per_team.y); /* we assume dividable */ stencil3d_nvgpu_kernel<<<teams_per_league, threads_per_team, 0, off->stream->systream.cudaStream>>> (start, len, n, m, k, u_dimX, u_dimY, u_dimZ, u, uold, radius, coeff_dimX, coeff);//NOTE: same as previous decl. } else #endif if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) { #if CORRECTNESS_CHECK BEGIN_SERIALIZED_PRINTF(off->devseqid); printf("udev: dev: %d, %dX%d\n", off->devseqid, n, m, k); print_array_dev("udev", off->devseqid, "u",(REAL*)u, n, m, k); printf("uolddev: dev: %d, %dX%d\n", off->devseqid, uold_0_length, uold_1_length); print_array_dev("uolddev", off->devseqid, "uold",(REAL*)uold, uold_0_length, uold_1_length); printf("i_start: %d, j_start: %d, n: %d, m: %d, k: %d, uold_0_offset: %d, uold_1_offset: %d\n", i_start, j_start, n, m, k, uold_0_offset, uold_1_offset); END_SERIALIZED_PRINTF(); #endif //#pragma omp for private(ix, iy, ir) int ix, iy, iz, ir; for (ix = start; ix < start + len; ix++) { REAL *temp_u = &u[(ix + radius) * u_dimY * u_dimZ + radius]; REAL *temp_uold = &uold[(ix + radius) * u_dimY * u_dimZ + radius]; for (iy = 0; iy < m; iy++) { for (iz = 0; iz < k; iz++) { // if (off->devseqid == 0)printf("dev: %d, [%d][%d]:%f\n", off->devseqid, ix, iy, temp_u[0]); REAL result = temp_uold[0] * coeff[0]; /* 2/4 way loop unrolling */ for (ir = 1; ir <= radius; ir++) { result += coeff[ir] * temp_uold[ir]; //horizontal right result += coeff[-ir] * temp_uold[-ir]; // horizontal left result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimZ]; //vertical up - z result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimZ]; // vertical bottom - z #ifdef SQUARE_SETNCIL result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimZ]-ir] // left bottom corner - z result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimZ]+ir] // right bottom corner - z #endif } *temp_u = result / count; temp_u++; temp_uold++; }//z end }//y end } } else { fprintf(stderr, "device type is not supported for this call\n"); } /* pthread_barrier_wait(&off->off_info->inter_dev_barrier); omp_halo_region_pull(map_u, 0, OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT); */ }
GB_binop__isle_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isle_int64 // A.*B function (eWiseMult): GB_AemultB__isle_int64 // A*D function (colscale): GB_AxD__isle_int64 // D*A function (rowscale): GB_DxB__isle_int64 // C+=B function (dense accum): GB_Cdense_accumB__isle_int64 // C+=b function (dense accum): GB_Cdense_accumb__isle_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int64 // C=scalar+B GB_bind1st__isle_int64 // C=scalar+B' GB_bind1st_tran__isle_int64 // C=A+scalar GB_bind2nd__isle_int64 // C=A'+scalar GB_bind2nd_tran__isle_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT64 || GxB_NO_ISLE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isle_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isle_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isle_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isle_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isle_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isle_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isle_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isle_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isle_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__isle_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__isle_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-begin-declare-variant_5.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX // expected-no-diagnostics int also_before(void) { return 1; } #pragma omp begin declare variant match(implementation={vendor(llvm)}) int also_after(void) { return 0; } int also_before(void) { return 0; } #pragma omp end declare variant int also_after(void) { return 2; } int main(void) { // Should return 0. return (also_after)() + (also_before)() + (&also_after)() + (&also_before)(); } // Make sure: // - we see the specialization in the AST // - we pick the right callees // C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // C-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // C-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // C-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // C-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // C-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // C-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // C-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 2 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:28:1> line:22:5 main 'int ({{.*}})' // C-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:16, line:28:1> // C-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, line:27:25> // C-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <line:24:10, line:27:25> 'int' '+' // C-NEXT: |-BinaryOperator [[ADDR_26:0x[a-z0-9]*]] <line:24:10, line:26:24> 'int' '+' // C-NEXT: | |-BinaryOperator [[ADDR_27:0x[a-z0-9]*]] <line:24:10, line:25:24> 'int' '+' // C-NEXT: | | |-PseudoObjectExpr [[ADDR_28:0x[a-z0-9]*]] <line:24:10, col:23> 'int' // C-NEXT: | | | |-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:23> 'int' // C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | | `-ParenExpr [[ADDR_31:0x[a-z0-9]*]] <col:10, col:21> 'int ({{.*}})' // C-NEXT: | | | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // C-NEXT: | | | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:10:1, line:24:23> 'int' // C-NEXT: | | | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: | | `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:25:10, col:24> 'int' // C-NEXT: | | |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:10, col:24> 'int' // C-NEXT: | | | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | `-ParenExpr [[ADDR_38:0x[a-z0-9]*]] <col:10, col:22> 'int ({{.*}})' // C-NEXT: | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // C-NEXT: | | `-CallExpr [[ADDR_40:0x[a-z0-9]*]] <line:13:1, line:25:24> 'int' // C-NEXT: | | `-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: | `-PseudoObjectExpr [[ADDR_42:0x[a-z0-9]*]] <line:26:10, col:24> 'int' // C-NEXT: | |-CallExpr [[ADDR_43:0x[a-z0-9]*]] <col:10, col:24> 'int' // C-NEXT: | | `-ParenExpr [[ADDR_44:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' // C-NEXT: | | `-UnaryOperator [[ADDR_45:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // C-NEXT: | | `-DeclRefExpr [[ADDR_46:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // C-NEXT: | `-CallExpr [[ADDR_47:0x[a-z0-9]*]] <line:10:1, line:26:24> 'int' // C-NEXT: | `-ImplicitCastExpr [[ADDR_48:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: `-PseudoObjectExpr [[ADDR_49:0x[a-z0-9]*]] <line:27:10, col:25> 'int' // C-NEXT: |-CallExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:25> 'int' // C-NEXT: | `-ParenExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:23> 'int (*)({{.*}})' // C-NEXT: | `-UnaryOperator [[ADDR_52:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // C-NEXT: | `-DeclRefExpr [[ADDR_53:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // C-NEXT: `-CallExpr [[ADDR_54:0x[a-z0-9]*]] <line:13:1, line:27:25> 'int' // C-NEXT: `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CXX-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 2 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:28:1> line:22:5 main 'int ({{.*}})' // CXX-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:16, line:28:1> // CXX-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, line:27:25> // CXX-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <line:24:10, line:27:25> 'int' '+' // CXX-NEXT: |-BinaryOperator [[ADDR_26:0x[a-z0-9]*]] <line:24:10, line:26:24> 'int' '+' // CXX-NEXT: | |-BinaryOperator [[ADDR_27:0x[a-z0-9]*]] <line:24:10, line:25:24> 'int' '+' // CXX-NEXT: | | |-PseudoObjectExpr [[ADDR_28:0x[a-z0-9]*]] <line:24:10, col:23> 'int' // CXX-NEXT: | | | |-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:23> 'int' // CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | | `-ParenExpr [[ADDR_31:0x[a-z0-9]*]] <col:10, col:21> 'int ({{.*}})' lvalue // CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CXX-NEXT: | | | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:10:1, line:24:23> 'int' // CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: | | `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:25:10, col:24> 'int' // CXX-NEXT: | | |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:10, col:24> 'int' // CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | `-ParenExpr [[ADDR_38:0x[a-z0-9]*]] <col:10, col:22> 'int ({{.*}})' lvalue // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CXX-NEXT: | | `-CallExpr [[ADDR_40:0x[a-z0-9]*]] <line:13:1, line:25:24> 'int' // CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: | `-PseudoObjectExpr [[ADDR_42:0x[a-z0-9]*]] <line:26:10, col:24> 'int' // CXX-NEXT: | |-CallExpr [[ADDR_43:0x[a-z0-9]*]] <col:10, col:24> 'int' // CXX-NEXT: | | `-ParenExpr [[ADDR_44:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' // CXX-NEXT: | | `-UnaryOperator [[ADDR_45:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // CXX-NEXT: | | `-DeclRefExpr [[ADDR_46:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CXX-NEXT: | `-CallExpr [[ADDR_47:0x[a-z0-9]*]] <line:10:1, line:26:24> 'int' // CXX-NEXT: | `-ImplicitCastExpr [[ADDR_48:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: `-PseudoObjectExpr [[ADDR_49:0x[a-z0-9]*]] <line:27:10, col:25> 'int' // CXX-NEXT: |-CallExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:25> 'int' // CXX-NEXT: | `-ParenExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:23> 'int (*)({{.*}})' // CXX-NEXT: | `-UnaryOperator [[ADDR_52:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // CXX-NEXT: | `-DeclRefExpr [[ADDR_53:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CXX-NEXT: `-CallExpr [[ADDR_54:0x[a-z0-9]*]] <line:13:1, line:27:25> 'int' // CXX-NEXT: `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
raytracing.c
/*====================================================================== CToy Raytracing sample (CPU) ------------------------------------------------------------------------ Copyright (c) 2015-2017 Anael Seghezzi <www.maratis3d.com> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. ========================================================================*/ #define USE_3D_NOISE /* raymarch 3d noise */ #include <ctoy.h> #define STB_PERLIN_IMPLEMENTATION #include "../util/stb_perlin.h" static struct m_image test_buffer = M_IMAGE_IDENTITY(); #define GET_RAY(ray, px, py, pz, hw, hh, ratio)\ {\ float3 pt = {((float)px - hw) / hw, (-((float)py - hh) / hh) * ratio, pz};\ float ptl = 1.0f / M_LENGHT3(pt);\ ray.x = pt.x * ptl;\ ray.y = pt.y * ptl;\ ray.z = pt.z * ptl;\ } static void draw(void) { float *data = (float *)test_buffer.data; int w = test_buffer.width; int h = test_buffer.height; int y; int test_t = ctoy_t(); float3 sphere_pos; float3 light_dir; float z_near = 1e-4; float ambient = 0.05f; float sphere_radius2; float sphere_tex_unit; float hw = w * 0.5f; float hh = h * 0.5f; float ratio = (float)ctoy_window_height() / (float)ctoy_window_width(); /* light */ light_dir.x = 0.5f; light_dir.y = 0.25f; light_dir.z = -0.5f; /* sphere */ sphere_radius2 = 150; sphere_pos.x = cosf(test_t * 0.04f) * 10.0f; sphere_pos.y = 0.0f; sphere_pos.z = 30.0f + (sinf(test_t * 0.04f) + 1.0f) * 4.0f; sphere_tex_unit = 0.25f; /* clear */ memset(test_buffer.data, 0, test_buffer.size * sizeof(float)); /* raytrace */ #pragma omp parallel for schedule(dynamic, 8) for (y = 0; y < h; y++) { float *pixel = data + y * w * 3; int x; for (x = 0; x < w; x++) { float3 origin = {0, 0, 0}; float3 ray, march_dir; float march_step = 0.25f; float idist, dist = 0, Z = 1e20; /* get ray from pixel position */ GET_RAY(ray, x, y, 1.35f, hw, hh, ratio); march_dir.x = ray.x * march_step; march_dir.y = ray.y * march_step; march_dir.z = ray.z * march_step; /* sphere */ m_3d_ray_sphere_intersection_in_out(&origin, &ray, &sphere_pos, sphere_radius2, &dist, &idist); if (dist > z_near) { if (dist < Z) { float3 rd = {ray.x * dist, ray.y * dist, ray.z * dist}; float3 pos = {origin.x + rd.x, origin.y + rd.y, origin.z + rd.z}; /* simple sphere */ #ifndef USE_3D_NOISE { float3 normal; float diffuse; M_SUB3(normal, pos, sphere_pos); M_NORMALIZE3(normal, normal); diffuse = M_DOT3(normal, light_dir); diffuse = M_MAX(0, diffuse); pixel[0] = ambient + diffuse; pixel[1] = ambient + diffuse; pixel[2] = ambient + diffuse; Z = dist; // noise as texture { float3 vcoord = { (pos.x - sphere_pos.x) * sphere_tex_unit, (pos.y - sphere_pos.y) * sphere_tex_unit, (pos.z - sphere_pos.z) * sphere_tex_unit }; float perlin = stb_perlin_noise3(vcoord.x, vcoord.y, vcoord.z, 0, 0, 0) * 0.5f + 0.5f; pixel[0] *= perlin > 0.6; pixel[1] *= perlin > 0.5; pixel[2] *= perlin > 0.3; } } /* volumetric ray marching inside a sphere (perlin noise test) */ #else { float3 march = pos; /* starting at sphere intersection */ int i = 0; for (i = 0; i < 32; i++) { float3 vcoord = { (march.x - sphere_pos.x) * sphere_tex_unit, (march.y - sphere_pos.y) * sphere_tex_unit, (march.z - sphere_pos.z) * sphere_tex_unit }; float perlin = stb_perlin_noise3(vcoord.x, vcoord.y, vcoord.z, 0, 0, 0); if (perlin > -0.2f) { /* render */ float3 normal; float diffuse; if (i == 0) { /* sphere normal */ M_SUB3(normal, pos, sphere_pos); M_NORMALIZE3(normal, normal); } else { /* volume normal */ float3 vco = {vcoord.x + 0.001f, vcoord.y + 0.001f, vcoord.z + 0.001f}; normal.x = perlin - stb_perlin_noise3(vco.x, vcoord.y, vcoord.z, 0, 0, 0); normal.y = perlin - stb_perlin_noise3(vcoord.x, vco.y, vcoord.z, 0, 0, 0); normal.z = perlin - stb_perlin_noise3(vcoord.x, vcoord.y, vco.z, 0, 0, 0); M_NORMALIZE3(normal, normal); } diffuse = M_DOT3(normal, light_dir); diffuse = M_MAX(0, diffuse); pixel[0] = ambient + diffuse; pixel[1] = ambient + diffuse; pixel[2] = ambient + diffuse; Z = dist; break; } /* march */ M_ADD3(march, march, march_dir); dist += march_step; if (dist > idist) /* out of the sphere */ break; } } #endif } } pixel += 3; } } } void ctoy_begin(void) { printf("Raytracing\n"); ctoy_window_title("Raytracing"); ctoy_window_size(512, 512); m_image_create(&test_buffer, M_FLOAT, 256, 256, 3); } void ctoy_end(void) { m_image_destroy(&test_buffer); } void ctoy_main_loop(void) { draw(); ctoy_swap_buffer(&test_buffer); }
2.c
/* Написать программу, в которой определить две параллельные области, выполнение которых зависит от условного оператора #pragma omp parallel if(...), если заданное значение числа нитей больше 1, параллельная область выполняется, иначе не выполняется. Число нитей перед первой областью задать равным 3, перед второй – равным 1. Внутри параллельных областей определить количество нитей и номер каждой нити, результат выдать на экран. Убедиться в правильности работы программы. */ #include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { omp_set_dynamic(0); omp_set_num_threads(3); #pragma omp parallel if (omp_get_max_threads() > 1) { if (omp_in_parallel()) printf("Нить номер %d, Количество нитей: %d\n", omp_get_thread_num(), omp_get_num_threads()); } omp_set_num_threads(1); #pragma omp parallel if (omp_get_max_threads() > 1) { if (omp_in_parallel()) printf("Нить номер %d, Количество нитей: %d\n", omp_get_thread_num(), omp_get_num_threads()); } return 0; }
declare_target-1.c
/* { dg-do run } */ #include <stdlib.h> #define THRESHOLD 20 #pragma omp declare target int fib (int n) { if (n <= 0) return 0; else if (n == 1) return 1; else return fib (n - 1) + fib (n - 2); } #pragma omp end declare target int fib_wrapper (int n) { int x = 0; #pragma omp target if(n > THRESHOLD) map(from:x) x = fib (n); return x; } int main () { if (fib (15) != fib_wrapper (15)) abort (); if (fib (25) != fib_wrapper (25)) abort (); return 0; }
convolution_pack1ton_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1ton_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); if (bias_data_ptr) { _sum = vle32_v_f32m2(bias_data_ptr + p * packn, vl); } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { float val = (float)sptr[space_ofs[k]]; vfloat16m1_t _w = vle16_v_f16m1(kptr, vl); _sum = vfwmacc_vf_f32m2(_sum, val, _w, vl); kptr += packn; } } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse16_v_f16m1(outptr + j * packn, vfncvt_f_f_w_f16m1(_sum, vl), vl); } outptr += outw * packn; } } } static void convolution_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); if (bias_data_ptr) { _sum = vle16_v_f16m1(bias_data_ptr + p * packn, vl); } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { __fp16 val = sptr[space_ofs[k]]; vfloat16m1_t _w = vle16_v_f16m1(kptr, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w, vl); kptr += packn; } } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse16_v_f16m1(outptr + j * packn, _sum, vl); } outptr += outw * packn; } } }
loop-13.c
/* { dg-do run } */ volatile int ji = 100, ki = 2; volatile unsigned int ju = 100, ku = 2; volatile long long int jll = 100, kll = 2; volatile unsigned long long int jull = 100, kull = 2; unsigned long long l; void f0 (void) { int i, j, k; unsigned int j2, k2; #pragma omp for reduction(+: l) for (i = ji; i < ki; i++) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) for (i = ji; i < ki; i++) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = ji; i < ki; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = ji; i < ki; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = ji; i < ki; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = ji; i < ki; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = ji; i < ki; i++) for (k = ki + 10; k < ji - 10; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = ki + 10; j < ji - 10; j++) for (i = ji; i < ki; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); } void f1 (void) { unsigned int i, j, k; int j2, k2; #pragma omp for reduction(+: l) for (i = ju; i < ku; i++) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) for (i = ju; i < ku; i++) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = ju; i < ku; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = ju; i < ku; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = ju; i < ku; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = ju; i < ku; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = ju; i < ku; i++) for (k = ku; k < ju; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = ku; j < ju; j++) for (i = ju; i < ku; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); } void f2 (void) { long long int i, j, k; unsigned long long int j2, k2; #pragma omp for reduction(+: l) for (i = jll; i < kll; i++) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) for (i = jll; i < kll; i++) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = jll; i < kll; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = jll; i < kll; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = jll; i < kll; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = jll; i < kll; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = jll; i < kll; i++) for (k = kll; k < jll; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = kll; j < jll; j++) for (i = jll; i < kll; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); } void f3 (void) { unsigned long long int i, j, k; long long int j2, k2; #pragma omp for reduction(+: l) for (i = jull; i < kull; i++) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) for (i = jull; i < kull; i++) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = jull; i < kull; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = jull; i < kull; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = jull; i < kull; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j2 = 0; j2 < 4; j2++) for (i = jull; i < kull; i++) for (k2 = 0; k2 < 5; k2 += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = 0; j < 4; j++) for (i = jull; i < kull; i++) for (k = kull; k < jull; k += 2) l++; if (l != 0) __builtin_abort (); #pragma omp parallel for reduction(+: l) collapse(3) for (j = kull; j < jull; j++) for (i = jull; i < kull; i++) for (k = 0; k < 5; k += 2) l++; if (l != 0) __builtin_abort (); } int main () { f0 (); f1 (); f2 (); f3 (); return 0; }
matrix.c
#include "matrix.h" #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> // Include SSE intrinsics #if defined(_MSC_VER) #include <intrin.h> #elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) #include <immintrin.h> #include <x86intrin.h> #endif /* Below are some intel intrinsics that might be useful * void _mm256_storeu_pd (double * mem_addr, __m256d a) * __m256d _mm256_set1_pd (double a) * __m256d _mm256_set_pd (double e3, double e2, double e1, double e0) * __m256d _mm256_loadu_pd (double const * mem_addr) * __m256d _mm256_add_pd (__m256d a, __m256d b) * __m256d _mm256_sub_pd (__m256d a, __m256d b) * __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c) * __m256d _mm256_mul_pd (__m256d a, __m256d b) * __m256d _mm256_cmp_pd (__m256d a, __m256d b, const int imm8) * __m256d _mm256_and_pd (__m256d a, __m256d b) * __m256d _mm256_max_pd (__m256d a, __m256d b) */ /* Generates a random double between low and high */ double rand_double(double low, double high) { double range = (high - low); double div = RAND_MAX / range; return low + (rand() / div); } /* Generates a random matrix */ void rand_matrix(matrix *result, unsigned int seed, double low, double high) { srand(seed); for (int i = 0; i < result->rows; i++) { for (int j = 0; j < result->cols; j++) { set(result, i, j, rand_double(low, high)); } } } /* * Allocates space for a matrix struct pointed to by the double pointer mat with * `rows` rows and `cols` columns. You should also allocate memory for the data array * and initialize all entries to be zeros. `parent` should be set to NULL to indicate that * this matrix is not a slice. You should also set `ref_cnt` to 1. * You should return -1 if either `rows` or `cols` or both have invalid values, or if any * call to allocate memory in this function fails. Return 0 upon success. */ //Done! int allocate_matrix(matrix **mat, int rows, int cols) { /* TODO: YOUR CODE HERE */ if (rows <= 0 || cols <= 0){ return -1; } *mat = (malloc(sizeof(matrix))); matrix * mm = *mat; mm->parent=NULL; mm->ref_cnt=1; double * data = calloc(rows * cols, sizeof(double)); for(int i = 0; i < rows * cols; i += 1){ data[i] = 0; } mm->data = data; mm->rows = rows; mm->cols = cols; return 0; } /* * Allocates space for a matrix struct pointed to by `mat` with `rows` rows and `cols` columns. * Its data should point to the `offset`th entry of `from`'s data (you do not need to allocate memory) * for the data field. `parent` should be set to `from` to indicate this matrix is a slice of `from`. * You should return -1 if either `rows` or `cols` or both are non-positive or if any * call to allocate memory in this function fails. Return 0 upon success. */ //Done! int allocate_matrix_ref(matrix **mat, matrix *from, int offset, int rows, int cols) { /* TODO: YOUR CODE HERE */ if (rows <= 0 || cols <= 0){ return -1; } *mat = (malloc(sizeof(matrix))); matrix * mm = *mat; if(mm == NULL){ return -1; } mm->rows = rows; mm->cols = cols; mm->data = from->data + offset; mm->parent = from; from->ref_cnt += 1; mm->ref_cnt = 1; return 0; } /* * You need to maindex2e sure that you only free `mat->data` if `mat` is not a slice and has no existing slices, * or if `mat` is the last existing slice of its parent matrix and its parent matrix has no other references * (including itself). You cannot assume that mat is not NULL.__ */ //Done! void deallocate_matrix(matrix *mat) { /* TODO: YOUR CODE HERE */ // printf("deleting in matrix.c\n"); if(mat == NULL) return; mat->ref_cnt -= 1; if(mat->ref_cnt > 0) return; // printf("ref count == 0, deleting in matrix.c\n"); if(mat->parent != NULL){ deallocate_matrix(mat->parent); } else { free(mat->data); } free(mat); } /* * Returns the double value of the matrix at the given row and column. * You may assume `row` and `col` are valid. */ //Done! double get(matrix *mat, int row, int col) { /* TODO: YOUR CODE HERE */ return mat->data[row*mat->cols + col]; } /* * Sets the value at the given row and column to val. You may assume `row` and * `col` are valid */ //Done! void set(matrix *mat, int row, int col, double val) { /* TODO: YOUR CODE HERE */ mat->data[row*mat->cols + col] = val; } /* * Sets all entries in mat to val */ //Done! void fill_matrix(matrix *mat, double val) { /* TODO: YOUR CODE HERE */ __m256d mat_fill = _mm256_set1_pd(val); int mat_size = mat->rows * mat->cols; #pragma omp parallel for for(int i = 0; i < mat_size / 4*4; i += 4){ _mm256_storeu_pd(mat->data + i, mat_fill); } //Tail Case: for(int i = mat_size / 4*4; i < mat_size; i += 1){ mat->data[i] = val; } } /* * Store the result of adding mat1 and mat2 to `result`. * Return 0 upon success and a nonzero value upon failure. */ //Done! int add_matrix(matrix *result, matrix *mat1, matrix *mat2) { /* TODO: YOUR CODE HERE */ //Error check if (result == NULL) { return -1; } if ( mat1->cols != mat2->cols || mat1->rows != mat2->rows || result->cols != mat2->cols || result->cols < 1 || result->rows < 1 || result->rows != mat2->rows) { return -1; } //Initialize variables int cnt = mat1->cols * mat1->rows; __m256d tmpp, add1, add2; //OpenMp optmization for add operation #pragma omp parallel for private(tmpp, add1, add2) for(int i = 0; i < cnt / 4*4; i += 4) { add1 = _mm256_loadu_pd(mat1->data + i); add2 = _mm256_loadu_pd(mat2->data + i); tmpp = _mm256_add_pd (add1, add2); _mm256_storeu_pd(result->data + i, tmpp); } //Tail case for(int i = cnt / 4*4; i < cnt; i += 1) { result->data[i] = mat1->data[i] + mat2->data[i]; } return 0; } /* * Store the result of subtracting mat2 from mat1 to `result`. * Return 0 upon success and a nonzero value upon failure. */ //Done! int sub_matrix(matrix *result, matrix *mat1, matrix *mat2) { /* TODO: YOUR CODE HERE */ //Error check if (result == NULL) { return -1; } if ( mat1->cols != mat2->cols || mat1->rows != mat2->rows || result->cols != mat2->cols || result->cols < 1 || result->rows < 1 || result->rows != mat2->rows) { return -1; } //Initialize variables int cnt = mat1->cols * mat1->rows; __m256d tmpp, sub1, sub2; //OpenMp optmization for sub operation #pragma omp parallel for private(tmpp, sub1, sub2) for(int i = 0; i < cnt / 4*4; i += 4) { sub1 = _mm256_loadu_pd(mat1->data + i); sub2 = _mm256_loadu_pd(mat2->data + i); tmpp = _mm256_sub_pd (sub1, sub2); _mm256_storeu_pd(result->data + i, tmpp); } //Tail case for(int i = cnt / 4*4; i < cnt; i += 1) { result->data[i] = mat1->data[i] - mat2->data[i]; } return 0; } /* * Store the result of multiplying mat1 and mat2 to `result`. * Return 0 upon success and a nonzero value upon failure. * Remember that matrix multiplication is not the same as multiplying individual elements. */ int mul_matrix(matrix *result, matrix *mat1, matrix *mat2) { /* TODO: YOUR CODE HERE */ //DISCLAIMER: //DISCUSSED CONCEPTS WITH MY ROOMATE BUT WROTE OUR OWN CODE //PS(0810):My roommate said our code looks very similar lol //Been helped by Caroline during office hours for debugging. //BulletPoints: //Allocate buffer matrix for calculation, one for mat one for result //Put the data in mat into the buffer //Initialize result matrix //Calculate power //Store the result bacindex2 //Been helped by Caroline during office hours for debugging. //Error check if (result == NULL) { return -1; } if (mat1->cols != mat2->rows || result->rows != mat1->rows || result->cols != mat2->cols ) { return -1; } //Last Version // static double *transpose = NULL; // static int transpose_len = 0; // if (result == NULL){ // return -1; // } // if ( mat1->cols != mat2->rows || // result->cols != mat2->cols || // result->rows != mat1->rows ){ // return -1; // } // // int cnt = mat1->cols * mat1->rows; // matrix * tmp_mat = result; // if(result == mat1 || result == mat2){ // allocate_matrix(&tmp_mat, result->cols, result->rows); // } // if(transpose_len < mat2->cols*mat2->rows){ // if(transpose !=NULL) free(transpose); // transpose=malloc(sizeof(double)*mat2->cols*mat2->rows); // } matrix *mat_mul = result; //Allocate memory space for the copy of intermediate result if (result == mat1 || result == mat2) { //Allocate memory space successfully mat_mul = (matrix*) malloc(sizeof(matrix)); //If failed in allocation, return wiith malloc errors if(allocate_matrix(&mat_mul, result->rows, result->cols) != 0) { return -1; } } //Declare global variables int ix, px; int mat_shape = mat1->rows * mat2->cols; __m256d mat_404 = _mm256_set1_pd(0); //VERY IMPORTANT: Transposing matrix 2 for performance optmization //Source: Caroline Liu (Huge thanks) and Wikipedia ;) //Optmized for larger matrix generation // if (mat2->cols >= 96) { // #pragma omp parallel for // for (int i = 0; i < mat2->rows * mat2->cols; i += 1) { // mat_tmp2[(i % mat2->cols) * mat2->rows + i / mat2->cols] = mat2->data[i]; // } else { // for (int i = 0; i < mat2->rows * mat2->cols; i += 1) { // mat_tmp2[(i % mat2->cols) * mat2->rows + i / mat2->cols] = mat2->data[i]; // } // //Declare local variables // int ix, px; // int mat_shape = mat1->rows * mat2->cols; // __m256d mat_404 = _mm256_set1_pd(0); //Allocate memory space for transposing matrix 2 double * mat2_trans = (double *) malloc(mat2->rows * mat2->cols * sizeof(double)); //If failed in allocation, return wiith malloc errors if (mat2_trans == NULL) { return -1; } for (ix = 0; ix < mat2->rows * mat2->cols; ix += 1) { mat2_trans[(ix % mat2->cols) * mat2->rows + ix / mat2->cols] = mat2->data[ix]; } // for(int row = 0; row < mat2->rows; row += 1) // for(int col = 0; col < mat2->cols; col += 1){ // transpose[col * mat2->rows + row] = mat2->data[row * mat2->cols + col]; // } // fill_matrix(result, 0); // void set(matrix *mat, int row, int col, double val) { // mat->data[row*mat->cols + col] = val; // } // #pragma omp parallel for // for(int i = 0; i < mat->cols; i += 1){ // result->data[i * result->cols + i] = 1; // } // unsigned int tmpp = 1 << 31; // #pragma omp parallel for // for (;tmpp & pow == 0;){ // tmpp = tmpp >> 1; // } // #pragma omp parallel for // for(;tmpp > 0;) { // mul_matrix(result, result, result); // if(tmpp & pow){ // mul_matrix(result, result, mat); // } // tmpp = tmpp >> 1; // } // return 0; //Divide multiplication into 2 parts: //small matrix (<128*128) multiplication //and large matrix (>128*128) multiplication if (mat1->rows >= 96) { //------------------------------------------ START OF WORKING SPACE-----------------------------------------------// //Version 0.1 // #pragma omp parallel for // for(int row = 0; row < result->rows; row += 1){ // for(int col = 0; col < result->cols; col += 1){ // double res = 0; // int i = 0; // double tmp_store[4]; // double *ref1 = mat1->data + row * mat1->cols + i; // double *ref2 = transpose + col * mat2->rows + i; // for(; i < mat1->cols-4; i += 4){ // __m256d a = _mm256_loadu_pd(ref1); // __m256d b = _mm256_loadu_pd(ref2); // __m256d c = _mm256_mul_pd (a, b); // __m256d d = _mm256_hadd_pd(c, a); // _mm256_storeu_pd(tmp_store, d); // res += tmp_store[0] + tmp_store[2]; // // res += tmp_store[1] + tmp_store[3]; // ref1 += 4; // ref2 += 4; // } // for(; i < mat1->cols; i += 1){ // res += *ref1 * *ref2; // i*mat2->cols + col // ref1 += 1; // ref2 += 1; // } // // printf("%.3lf\n", res); // set(tmp_mat, row, col, res); // } // } // } #pragma omp parallel for private(ix, px) for (ix = 0; ix < mat_shape - (mat_shape % 2); ix += 2) { //Initialize local veriables double tmp_store[4]; __m256d tmpp = mat_404; int rx = ix / mat2->cols * mat1->cols; int cx = ix % mat2->cols * mat2->rows; int rx1 = (ix + 1) / mat2->cols * mat1->cols; int cx1 = (ix + 1) % mat2->cols * mat2->rows; for (px = 0; px < mat1->cols / 4*4; px += 4) { // __m26d mat1_tmp = // _mm256_loadu_pd(mat1->data + mat_row + index2); // __m256d mat2_tmp = // _mm256_loadu_pd(mat_tmp2 + mat_col + index2); // __m256d mat1_load = // _mm256_loadu_pd(mat1->data + mat_row1 + index2); // __m256d mat2_load = // _mm256_loadu_pd(mat_tmp2 + mat_col1 + index2); // __m256d mul_1 = // _mm256_mul_pd(mat1_tmp, mat2_tmp); // __m256d mul_2 = // _mm256_mul_pd(mat1_load, mat2_load); __m256d load_mat1_a = _mm256_loadu_pd(mat1->data + rx + px); __m256d load_mat1_b = _mm256_loadu_pd(mat1->data + rx1 + px); __m256d load_mat2_a = _mm256_loadu_pd(mat2_trans + cx + px); __m256d load_mat2_b = _mm256_loadu_pd(mat2_trans + cx1 + px); // __m256d a = _mm256_loadu_pd(ref1); // __m256d b = _mm256_loadu_pd(ref2); // __m256d c = _mm256_mul_pd (a, b); // __m256d d = _mm256_hadd_pd(c, a); // _mm256_storeu_pd(tmp_store, d); // res += tmp_store[0] + tmp_store[2]; __m256d mul_1 = _mm256_mul_pd(load_mat1_a, load_mat2_a); __m256d mul_2 = _mm256_mul_pd(load_mat1_b, load_mat2_b); // _mm256_mul_pd(mat1_load, mat2_load); // __m256d mul_result = // _mm256_hadd_pd(mul_1, mul_2); // tmp = _mm256_add_pd(tmp, mul_result); __m256d tmp_res = _mm256_hadd_pd(mul_1, mul_2); tmpp = _mm256_add_pd(tmpp, tmp_res); } _mm256_storeu_pd(tmp_store, tmpp); double tmp1 = tmp_store[0] + tmp_store[2]; double tmp2 = tmp_store[1] + tmp_store[3]; for (px = mat1->cols / 4*4; px < mat1->cols; px += 1) { tmp1 += mat1->data[rx + px] * mat2_trans[cx + px]; tmp2 += mat1->data[rx1 + px] * mat2_trans[cx1 + px]; } mat_mul->data[ix] = tmp1; mat_mul->data[ix + 1] = tmp2; } } else { //Non-OpenMp Version for (ix = 0; ix < mat_shape - (mat_shape % 2); ix += 2) { //Initialize local veriables double tmp_store[4]; __m256d tmpp = mat_404; int rx = ix / mat2->cols * mat1->cols; int cx = ix % mat2->cols * mat2->rows; int rx1 = (ix + 1) / mat2->cols * mat1->cols; int cx1 = (ix + 1) % mat2->cols * mat2->rows; for (px = 0; px < mat1->cols / 4*4; px += 4) { // _mm256_loadu_pd(mat_tmp2 + mat_col + index2); // __m256d mat1_load = // _mm256_loadu_pd(mat1->data + mat_row1 + index2); // __m256d mat2_load = // _mm256_loadu_pd(mat_tmp2 + mat_col1 + index2); // __m256d mul_1 = // _mm256_mul_pd(mat1_tmp, mat2_tmp); __m256d load_mat1_a = _mm256_loadu_pd(mat1->data + rx + px); __m256d load_mat1_b = _mm256_loadu_pd(mat1->data + rx1 + px); __m256d load_mat2_a = _mm256_loadu_pd(mat2_trans + cx + px); __m256d load_mat2_b = _mm256_loadu_pd(mat2_trans + cx1 + px); // __m256d a = _mm256_loadu_pd(ref1); // __m256d b = _mm256_loadu_pd(ref2); // __m256d c = _mm256_mul_pd (a, b); // __m256d d = _mm256_hadd_pd(c, a); // _mm256_storeu_pd(tmp_store, d); // res += tmp_store[0] + tmp_store[2]; __m256d mul_1 = _mm256_mul_pd(load_mat1_a, load_mat2_a); __m256d mul_2 = _mm256_mul_pd(load_mat1_b, load_mat2_b); // _mm256_mul_pd(mat1_load, mat2_load); // __m256d mul_result = // _mm256_hadd_pd(mul_1, mul_2); // tmp = _mm256_add_pd(tmp, mul_result); __m256d tmp_res = _mm256_hadd_pd(mul_1, mul_2); tmpp = _mm256_add_pd(tmpp, tmp_res); } _mm256_storeu_pd(tmp_store, tmpp); double tmpp1 = tmp_store[0] + tmp_store[2]; double tmpp2 = tmp_store[1] + tmp_store[3]; for (px = mat1->cols / 4*4; px < mat1->cols; px += 1) { tmpp1 += mat1->data[rx + px] * mat2_trans[cx + px]; tmpp2 += mat1->data[rx1 + px] * mat2_trans[cx1 + px]; } mat_mul->data[ix] = tmpp1; mat_mul->data[ix + 1] = tmpp2; } } //Source: My roommate :) //A big "tail case" for the case that it cannot be distributed by 2 if (mat_shape % 2) { //Initialize local veriables double tmp_store[4]; __m256d mat_0x = mat_404; //Calculate dimension int dim1 = (mat1->rows - 1) * mat1->cols; int dim2 = (mat2->cols - 1) * mat2->rows; for (px = 0; px < mat1->cols / 4*4; px += 4) { __m256d load_mat1 = _mm256_loadu_pd(mat1->data + dim1 + px); __m256d load_mat2 = _mm256_loadu_pd(mat2_trans + dim2 + px); // _mm256_mul_pd(mat1_load, mat2_load); // __m256d mul_result = // _mm256_hadd_pd(mul_1, mul_2); // tmp = _mm256_add_pd(tmp, mul_result); __m256d tmp_mulx = _mm256_mul_pd(load_mat1, load_mat2); mat_0x = _mm256_add_pd(mat_0x, tmp_mulx); } _mm256_storeu_pd(tmp_store, mat_0x); double tmpp = tmp_store[0] + tmp_store[1] + tmp_store[2] + tmp_store[3]; //tail case for (px = mat1->cols / 4*4; px < mat1->cols; px += 1) { tmpp += mat1->data[dim1 + px] * mat2_trans[dim2 + px]; } mat_mul->data[mat_shape - 1] = tmpp; } //------------------------------------------ END OF WORKING SPACE-----------------------------------------------// // if(result == mat1 || result == mat2){ // double * tmp = tmp_mat->data; // tmp_mat->data = result->data; // result->data = tmp; // deallocate_matrix(tmp_mat); // } // return 0; // } free(mat2_trans); if (result == mat1 || result == mat2) { double * swap_buff = mat_mul->data; mat_mul->data = result->data; result->data = swap_buff; deallocate_matrix(mat_mul); } return 0; } /* * Store the result of raising mat to the (pow)th power to `result`. * Return 0 upon success and a nonzero value upon failure. * Remember that pow is defined with matrix multiplication, not element-wise multiplication. */ int pow_matrix(matrix *result, matrix *mat, int pow) { /* TODO: YOUR CODE HERE */ //Error checking if ( result == NULL || pow < 0 || mat->rows != mat->cols || mat->rows < 1 || mat->cols < 1 || result->cols != mat->cols || result->rows != mat->rows || result->cols != result->rows){ return -1; } int cnt = mat->rows * mat->cols; //fill_matrix(result, 0); //void set(matrix *mat, int row, int col, double val) { /* TODO: YOUR CODE HERE */ //mat->data[row*mat->cols + col] = val; //} //#pragma omp parallel for //for(int i = 0; i < mat->cols; i += 1){ // result->data[i * result->cols + i] = 1; //} //unsigned int tmpp = 1 << 31; //#pragma omp parallel for //for (;tmpp & pow == 0;){ // tmpp = tmpp >> 1; //} //#pragma omp parallel for //for(;tmpp > 0;) { // mul_matrix(result, result, result); // if(tmpp & pow){ // mul_matrix(result, result, mat); // } // tmpp = tmpp >> 1; //} //return 0; //} //DISCLAIMER: //DISCUSSED CONCEPTS WITH MY ROOMATE BUT WROTE OUR OWN CODE //Been helped by Caroline during office hours for debugging. //BulletPoints: //Allocate buffer matrix for calculation, one for mat one for result //Put the data in mat into the buffer //Initialize result matrix //Calculate power //Store the result bacindex2 //Been helped by Caroline during office hours for debugging. //Allocate memory space for power calculation matrix *mat_pow = (matrix *) malloc(sizeof(matrix)); //If failed in allocation, return wiith malloc errors if(allocate_matrix(&mat_pow, result->rows, result->cols) != 0) { return -1; } //Copy mat matrix __m256d val; #pragma omp parallel for for(int i = 0; i < cnt / 4 * 4; i += 4) { val = _mm256_loadu_pd(mat->data + i); _mm256_storeu_pd(mat_pow->data + i, val); } //tail case: for(int i = cnt/ 4 * 4; i < cnt; i += 1) { mat_pow->data[i] = mat->data[i]; } //Allocate a temp to store result and access result at the end to reduce memory access matrix *mat_tmp = result; if(result == mat) { mat_tmp = (matrix*) malloc(sizeof(matrix)); //Throw error if allocation fails if(allocate_matrix(&mat_tmp, result->rows, result->cols) != 0) { return -1; } } //Initialize the result matrix fill_matrix(mat_tmp, 0); #pragma omp parallel for for(int i = 0; i < mat->cols; i += 1){ result->data[i * result->cols + i] = 1; } while (pow > 0) { if ((pow & 0x1) == 1) { mul_matrix(mat_tmp, mat_tmp, mat_pow); } pow = pow >> 1; if (pow > 0) { mul_matrix(mat_pow, mat_pow, mat_pow); } } if (result == mat) { //Store the result of power bacindex2 into result double * tmp_data = mat_tmp->data; mat_tmp->data = result->data; result->data = tmp_data; //Free the memory of tmp matrix deallocate_matrix(mat_tmp); } //Question: why no need to free mat_pow? return 0; } /* * Store the result of element-wise negating mat's entries to `result`. * Return 0 upon success and a nonzero value upon failure. */ //Done! int neg_matrix(matrix *result, matrix *mat) { /* TODO: YOUR CODE HERE */ //Error Checindex2ing if (result == NULL || result->cols < 1 || result->rows < 1 ){ return -1; } if (result->cols != mat->cols || result->rows != mat->rows) { return -1; } int cnt = mat->rows * mat->cols; __m256d neg_anchor = _mm256_set1_pd(0); #pragma omp parallel for for (int i = 0; i < cnt / 4 * 4; i += 4) { __m256d mat_data = _mm256_loadu_pd(mat->data + i); __m256d neg_data = _mm256_sub_pd(neg_anchor, mat_data); _mm256_storeu_pd(result->data + i, neg_data); } //Tail case: for (int i = cnt / 4*4; i < cnt; i += 1) { result->data[i] = 0 - mat->data[i]; } return 0; } /* * Store the result of taindex2ing the absolute value element-wise to `result`. * Return 0 upon success and a nonzero value upon failure. */ //Done! int abs_matrix(matrix *result, matrix *mat) { /* TODO: YOUR CODE HERE */ //Error checindex2ing: if (result == NULL || result->cols < 1 || result->rows < 1 ){ return -1; } if (result->cols != mat->cols || result->rows != mat->rows) { return -1; } //Could do manual unrolling and simd here but too lazy to do so. #pragma omp parallel for for (int i = 0; i < mat->rows*mat->cols; i += 1) { if(mat->data[i] < 0) { result->data[i] = 0 - mat->data[i]; } else { result->data[i] = mat->data[i]; } } return 0; }
hello1.c
#include <stdio.h> #include <omp.h> int main(void) { omp_set_num_threads(8); //I have set the number of threads =4, you can change this. #pragma omp parallel printf("Hello, world.\n"); return 0; }