source
stringlengths
3
92
c
stringlengths
26
2.25M
kernel_parallel.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> # if defined(_OPENMP) #include <omp.h> # endif # if defined(__APPLE__) && defined(__arm64__) #include <pthread.h> # endif typedef struct gemm_def { libxsmm_datatype in_type; libxsmm_datatype out_type; libxsmm_datatype comp_type; libxsmm_blasint m; libxsmm_blasint n; libxsmm_blasint k; libxsmm_blasint lda; libxsmm_blasint ldb; libxsmm_blasint ldc; double alpha; double beta; int trans_a; int trans_b; int vnni_a; int vnni_b; int vnni_c; int unsigned_a; int unsigned_b; int unsigned_c; int aligned_a; int aligned_c; int prefetch; int br_type; libxsmm_blasint br_count; int br_unroll; int tc_config; float scf; } gemm_def; void init_random_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) { double* d_data = (double*) data; float* f_data = (float*) data; libxsmm_bfloat16* bf_data = (libxsmm_bfloat16*) data; int* i_data = (int*) data; short* s_data = (short*) data; char* c_data = (char*) data; unsigned int l_r, l_i, l_j; for (l_r = 0; l_r < br; l_r++) { for (l_i = 0; l_i < ld; l_i++) { for (l_j = 0; l_j < n; l_j++) { if ( dtype == LIBXSMM_DATATYPE_F64 ) { d_data[(l_r * ld * n) + (l_j * ld) + l_i] = libxsmm_rng_f64(); } else if ( dtype == LIBXSMM_DATATYPE_F32 ) { f_data[(l_r * ld * n) + (l_j * ld) + l_i] = (float)libxsmm_rng_f64(); } else if ( dtype == LIBXSMM_DATATYPE_BF16 ) { union libxsmm_bfloat16_hp tmp; tmp.f = (float)libxsmm_rng_f64(); bf_data[(l_r * ld * n) + (l_j * ld) + l_i] = tmp.i[1]; } else if ( dtype == LIBXSMM_DATATYPE_I32 ) { i_data[(l_r * ld * n) + (l_j * ld) + l_i] = (int) (libxsmm_rng_f64() * 20.0); } else if ( dtype == LIBXSMM_DATATYPE_I16 ) { s_data[(l_r * ld * n) + (l_j * ld) + l_i] = (short)(libxsmm_rng_f64() * 20.0); } else if ( dtype == LIBXSMM_DATATYPE_I8 ) { c_data[(l_r * ld * n) + (l_j * ld) + l_i] = (char) (libxsmm_rng_f64() * 20.0); } else { } } } } } void init_zero_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) { char* l_data = (char*) data; memset( l_data, 0x0, br*ld*n*LIBXSMM_TYPESIZE(dtype) ); } void init_garbage_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) { char* l_data = (char*) data; memset( l_data, 0xdeadbeef, br*ld*n*LIBXSMM_TYPESIZE(dtype) ); } void ref_matmul( gemm_def* i_gemm_def, void* a, void* b, void* c ) { unsigned int l_r, l_j, l_i, l_s, l_k2; unsigned int lda = i_gemm_def->lda; unsigned int ldb = i_gemm_def->ldb; unsigned int ldc = i_gemm_def->ldc; unsigned int m = i_gemm_def->m; unsigned int n = i_gemm_def->n; unsigned int k = i_gemm_def->k; if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_F64) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_F64) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_F64) ) { double* d_a = (double*)a; double* d_b = (double*)b; double* d_c = (double*)c; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { d_c[(l_j * ldc) + l_i] = 0.0; } for (l_s = 0; l_s < k; l_s++) { if ( i_gemm_def->trans_b == 0 ) { d_c[(l_j * ldc) + l_i] += d_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * d_b[(l_r * ldb * n) + ((l_j * ldb) + l_s)]; } else { d_c[(l_j * ldc) + l_i] += d_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * d_b[(l_r * ldb * k) + ((l_s * ldb) + l_j)]; } } } } } } else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_F32) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_F32) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) { float* f_a = (float*)a; float* f_b = (float*)b; float* f_c = (float*)c; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { f_c[(l_j * ldc) + l_i] = 0.0; } for (l_s = 0; l_s < k; l_s++) { if ( i_gemm_def->trans_b == 0 ) { f_c[(l_j * ldc) + l_i] += f_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * f_b[(l_r * ldb * n) + ((l_j * ldb) + l_s)]; } else { f_c[(l_j * ldc) + l_i] += f_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * f_b[(l_r * ldb * k) + ((l_s * ldb) + l_j)]; } } } } } } else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I16) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) ) { short* s_a = (short*)a; short* s_b = (short*)b; int* i_c = (int*)c; int l_k_block = 2; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { i_c[(l_j * ldc) + l_i] = 0; } for (l_s = 0; l_s < (k / l_k_block); l_s++) { for (l_k2 = 0; l_k2 < l_k_block; l_k2++) { i_c[(l_j * ldc) + l_i] += s_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] * s_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2]; } } } } } } else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) && (i_gemm_def->unsigned_a == 1) && (i_gemm_def->unsigned_b == 0) ) { unsigned char* c_a = (unsigned char*)a; char* c_b = (char*)b; int* i_c = (int*)c; int l_k_block = 4; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { i_c[(l_j * ldc) + l_i] = 0; } for (l_s = 0; l_s < (k / l_k_block); l_s++) { for (l_k2 = 0; l_k2 < l_k_block; l_k2++) { i_c[(l_j * ldc) + l_i] += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] * c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2]; } } } } } } else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) && (i_gemm_def->unsigned_a == 0) && (i_gemm_def->unsigned_b == 1) ) { char* c_a = (char*)a; unsigned char* c_b = (unsigned char*)b; int* i_c = (int*)c; int l_k_block = 4; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { i_c[(l_j * ldc) + l_i] = 0; } for (l_s = 0; l_s < (k / l_k_block); l_s++) { for (l_k2 = 0; l_k2 < l_k_block; l_k2++) { i_c[(l_j * ldc) + l_i] += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] * c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2]; } } } } } } else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_I8) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) && (i_gemm_def->unsigned_a == 0) && (i_gemm_def->unsigned_b == 1) && (i_gemm_def->unsigned_c == 1) ) { char* c_a = (char*)a; unsigned char* c_b = (unsigned char*)b; unsigned char* c_c = (unsigned char*)c; int l_k_block = 4; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { int tmp; float ftmp; if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { tmp = 0; } else { tmp = (int)c_c[(l_j * ldc) + l_i]; } for (l_s = 0; l_s < (k / l_k_block); l_s++) { for (l_k2 = 0; l_k2 < l_k_block; l_k2++) { tmp += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] * c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2]; } } ftmp = (float)tmp; ftmp *= i_gemm_def->scf; c_c[(l_j * ldc) + l_i] = (unsigned char)ftmp; } } } } else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_BF16) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_F32) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) { libxsmm_bfloat16* h_a = (libxsmm_bfloat16*)a; libxsmm_bfloat16* h_b = (libxsmm_bfloat16*)b; float* f_c = (float*)c; int l_k_block = ( i_gemm_def->vnni_a != 0) ? 2 : 1; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { f_c[(l_j * ldc) + l_i] = 0.0f; } for (l_s = 0; l_s < (k / l_k_block); l_s++) { for (l_k2 = 0; l_k2 < l_k_block; l_k2++) { union libxsmm_bfloat16_hp tmp_a_f; union libxsmm_bfloat16_hp tmp_b_f; tmp_a_f.i[0] = 0; tmp_a_f.i[1] = h_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2]; tmp_b_f.i[0] = 0; tmp_b_f.i[1] = h_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2]; f_c[(l_j * ldc) + l_i] += tmp_a_f.f * tmp_b_f.f; } } } } } } else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_BF16) && (i_gemm_def->out_type == LIBXSMM_DATATYPE_BF16) && (i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) { libxsmm_bfloat16* h_a = (libxsmm_bfloat16*)a; libxsmm_bfloat16* h_b = (libxsmm_bfloat16*)b; libxsmm_bfloat16* h_c = (libxsmm_bfloat16*)c; int l_k_block = ( i_gemm_def->vnni_a != 0) ? 2 : 1; float acc = 0.0f; libxsmm_bfloat16 h_acc; for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) { for (l_j = 0; l_j < n; l_j++) { for (l_i = 0; l_i < m; l_i++) { if ( (i_gemm_def->beta == 0) && (l_r == 0) ) { acc = 0.0f; } else { union libxsmm_bfloat16_hp tmp; tmp.i[0] = 0; tmp.i[1] = h_c[(l_j * ldc) + l_i]; acc = tmp.f; } for (l_s = 0; l_s < (k / l_k_block); l_s++) { for (l_k2 = 0; l_k2 < l_k_block; l_k2++) { union libxsmm_bfloat16_hp tmp_a_f; union libxsmm_bfloat16_hp tmp_b_f; tmp_a_f.i[0] = 0; tmp_a_f.i[1] = h_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2]; tmp_b_f.i[0] = 0; tmp_b_f.i[1] = h_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2]; acc += tmp_a_f.f * tmp_b_f.f; } } libxsmm_rne_convert_fp32_bf16( &acc, &h_acc, 1 ); h_c[(l_j * ldc) + l_i] = h_acc; } } } } } double check_matrix( libxsmm_datatype dtype, void* data_gold, void* data, libxsmm_blasint ld, libxsmm_blasint m, libxsmm_blasint n ) { libxsmm_matdiff_info l_diff; double max_error = 0.0; libxsmm_matdiff_clear(&l_diff); if ( dtype == LIBXSMM_DATATYPE_F64 ) { libxsmm_matdiff(&l_diff, LIBXSMM_DATATYPE_F64, m, n, data_gold, data, &ld, &ld); max_error = l_diff.linf_abs; } else if ( dtype == LIBXSMM_DATATYPE_F32 ) { libxsmm_matdiff(&l_diff, LIBXSMM_DATATYPE_F32, m, n, data_gold, data, &ld, &ld); max_error = l_diff.linf_abs; } else if ( dtype == LIBXSMM_DATATYPE_BF16 ) { unsigned int l_i, l_j; libxsmm_bfloat16* h_data = (libxsmm_bfloat16*)data; libxsmm_bfloat16* h_data_gold = (libxsmm_bfloat16*)data_gold; for (l_i = 0; l_i < m; l_i++) { for (l_j = 0; l_j < n; l_j++) { union libxsmm_bfloat16_hp tmp_c; union libxsmm_bfloat16_hp tmp_gold; double l_fabs; tmp_c.i[1] = h_data[(l_j * ld) + l_i]; tmp_c.i[0] = 0; tmp_gold.i[1] = h_data_gold[(l_j * ld) + l_i]; tmp_gold.i[0] = 0; l_fabs = fabs((double)tmp_gold.f - (double)tmp_c.f); if (max_error < l_fabs) max_error = l_fabs; } } } else if ( dtype == LIBXSMM_DATATYPE_I32 ) { unsigned int l_i, l_j; int* l_data = (int*)data; int* l_data_gold = (int*)data_gold; for (l_i = 0; l_i < m; l_i++) { for (l_j = 0; l_j < n; l_j++) { const double l_fabs = fabs((double)l_data_gold[(l_j * ld) + l_i] - (double)l_data[(l_j * ld) + l_i]); if (max_error < l_fabs) max_error = l_fabs; } } } else if ( dtype == LIBXSMM_DATATYPE_I8 ) { unsigned int l_i, l_j; unsigned char* l_data = (unsigned char*)data; unsigned char* l_data_gold = (unsigned char*)data_gold; for (l_i = 0; l_i < m; l_i++) { for (l_j = 0; l_j < n; l_j++) { const double l_fabs = fabs((double)l_data_gold[(l_j * ld) + l_i] - (double)l_data[(l_j * ld) + l_i]); if (max_error < l_fabs) max_error = l_fabs; } } } else { max_error = 100.0; } return max_error; } double jit_matmul( const gemm_def* i_gemm_def, const void* i_a, const void* i_b, void* o_c, void* o_c_perf, const int i_reps, const unsigned int i_print_jit_info ) { /* define function pointer */ libxsmm_xmmfunction l_test_jit = { NULL }; libxsmm_xmmfunction cfg_tr = { NULL }; libxsmm_xmmfunction rls_tr = { NULL }; libxsmm_timer_tickint l_start; libxsmm_mmkernel_info l_info; libxsmm_gemm_shape l_shape; libxsmm_gemm_batch_reduce_config l_brconfig; libxsmm_gemm_ext_unary_argops l_argops; libxsmm_gemm_ext_binary_postops l_postops; libxsmm_bitfield l_flags = LIBXSMM_GEMM_FLAGS('N', 'N'); libxsmm_bitfield l_prefetch_flags = 0; #if defined(USE_GEMM_EXT_FRONTEND) libxsmm_gemm_ext_param gemm_param; #else libxsmm_gemm_param gemm_param; #endif double l_jittime, l_runtime; size_t l_t, l_r; char** l_a_addr = (char**)malloc(i_gemm_def->br_count*sizeof(char*)); char** l_b_addr = (char**)malloc(i_gemm_def->br_count*sizeof(char*)); unsigned long long* l_a_offs = (unsigned long long*)malloc(i_gemm_def->br_count*sizeof(unsigned long long)); unsigned long long* l_b_offs = (unsigned long long*)malloc(i_gemm_def->br_count*sizeof(unsigned long long)); double l_beta = i_gemm_def->beta; unsigned long long l_br = (unsigned long long)i_gemm_def->br_count; int l_cfg_flags = 0; int l_rls_flags = 0; if (0 == i_gemm_def) { fprintf(stderr, "JIT: unsupported descriptor arguments or data type!\n"); return EXIT_FAILURE; } /* setup brgemm offsets */ if ( i_gemm_def->br_type == 2 ) { for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) { l_a_offs[l_r] = l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type); if (i_gemm_def->trans_b == 0) { l_b_offs[l_r] = l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type); } else { l_b_offs[l_r] = l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type); } } } /* set up the flags */ if ( i_gemm_def->trans_b != 0 ) { l_flags |= LIBXSMM_GEMM_FLAG_TRANS_B; } if ( i_gemm_def->trans_a != 0 ) { fprintf(stderr, "trans_a needs to be 0\n"); return EXIT_FAILURE; } if ( i_gemm_def->vnni_a != 0 ) { l_flags |= LIBXSMM_GEMM_FLAG_VNNI_A; } if ( i_gemm_def->unsigned_a != 0 ) { l_flags |= LIBXSMM_GEMM_FLAG_A_UNSIGNED; } if ( i_gemm_def->unsigned_b != 0 ) { l_flags |= LIBXSMM_GEMM_FLAG_B_UNSIGNED; } l_flags |= (0 != i_gemm_def->aligned_a ? LIBXSMM_GEMM_FLAG_ALIGN_A : 0); l_flags |= (0 != i_gemm_def->aligned_c ? LIBXSMM_GEMM_FLAG_ALIGN_C : 0); l_flags |= ( l_beta == 0 ) ? LIBXSMM_GEMM_FLAG_BETA_0 : 0; /* setting update GEMM struct */ l_shape.m = i_gemm_def->m; l_shape.n = i_gemm_def->n; l_shape.k = i_gemm_def->k; l_shape.lda = (void*)&(i_gemm_def->lda); l_shape.ldb = (void*)&(i_gemm_def->ldb); l_shape.ldc = (void*)&(i_gemm_def->ldc); l_shape.a_in_type = i_gemm_def->in_type; l_shape.b_in_type = i_gemm_def->in_type; l_shape.out_type = i_gemm_def->out_type; l_shape.comp_type = i_gemm_def->comp_type; /* setting BRGEMM config struct */ if (i_gemm_def->br_type == 1) { l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_ADDRESS; l_brconfig.br_stride_a_hint = 0; l_brconfig.br_stride_b_hint = 0; l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count; } else if (i_gemm_def->br_type == 2) { l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_OFFSET; l_brconfig.br_stride_a_hint = 0; l_brconfig.br_stride_b_hint = 0; l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count; } else if (i_gemm_def->br_type == 3) { l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_STRIDE; l_brconfig.br_stride_a_hint = i_gemm_def->lda*i_gemm_def->k*LIBXSMM_TYPESIZE(i_gemm_def->in_type); l_brconfig.br_stride_b_hint = (i_gemm_def->trans_b == 0) ? i_gemm_def->ldb*i_gemm_def->n*LIBXSMM_TYPESIZE(i_gemm_def->in_type) : i_gemm_def->ldb*i_gemm_def->k*LIBXSMM_TYPESIZE(i_gemm_def->in_type); l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count; } else { l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_NONE; l_brconfig.br_stride_a_hint = 0; l_brconfig.br_stride_b_hint = 0; l_brconfig.br_unroll_hint = 0; } /* setting prefetch flags */ l_prefetch_flags = i_gemm_def->prefetch; /* setting ext structs to 0 */ memset( &l_argops, 0, sizeof(libxsmm_gemm_ext_unary_argops) ); memset( &l_postops, 0, sizeof(libxsmm_gemm_ext_binary_postops) ); l_start = libxsmm_timer_tick(); if (i_gemm_def->tc_config) { l_cfg_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | l_flags; l_rls_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | l_flags; l_flags |= (LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG); cfg_tr.gemm = libxsmm_dispatch_gemm_v2( l_shape, l_cfg_flags, l_prefetch_flags, l_brconfig ); rls_tr.gemm = libxsmm_dispatch_gemm_v2( l_shape, l_rls_flags, l_prefetch_flags, l_brconfig ); } #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext = libxsmm_dispatch_gemm_ext_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig, l_argops, l_postops ); #else l_test_jit.gemm = libxsmm_dispatch_gemm_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig ); #endif l_jittime = libxsmm_timer_duration(l_start, libxsmm_timer_tick()); if (l_test_jit.xmm == 0) { printf("JIT failed, please run with LIBXSMM_VERBOSE=-1 and/or with debug mode LIBXSMM library!\n"); exit(EXIT_FAILURE); } /* receive kernel information */ libxsmm_get_mmkernel_info(l_test_jit, &l_info); /* run external tileconfig */ if (i_gemm_def->tc_config) { cfg_tr.gemm( NULL ); } /* reset GEMM parameter */ #if defined(USE_GEMM_EXT_FRONTEND) memset( &gemm_param, 0, sizeof(libxsmm_gemm_ext_param) ); #else memset( &gemm_param, 0, sizeof(libxsmm_gemm_param) ); #endif gemm_param.op.tertiary = &l_br; gemm_param.c.primary = (void*)o_c; gemm_param.c.tertiary = (void*)(( i_gemm_def->unsigned_c != 0 ) ? &(i_gemm_def->scf) : NULL); /* run correctness */ if (i_gemm_def->br_type == 0) { gemm_param.a.primary = (void*)i_a; gemm_param.b.primary = (void*)i_b; if ( l_info.prefetch != LIBXSMM_GEMM_PREFETCH_NONE ) { gemm_param.a.quaternary = (void*)i_a; gemm_param.b.quaternary = (void*)i_b; gemm_param.c.quaternary = (void*)o_c; } #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } else if (i_gemm_def->br_type == 1) { gemm_param.a.primary = l_a_addr; gemm_param.b.primary = l_b_addr; for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) { l_a_addr[l_r] = (char*)i_a + (l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type)); if (i_gemm_def->trans_b == 0) { l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type)); } else { l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type)); } } #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } else if (i_gemm_def->br_type == 2) { gemm_param.a.primary = (void*)i_a; gemm_param.a.secondary = l_a_offs; gemm_param.b.primary = (void*)i_b; gemm_param.b.secondary = l_b_offs; #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } else if (i_gemm_def->br_type == 3) { gemm_param.a.primary = (void*)i_a; gemm_param.b.primary = (void*)i_b; #if defined(USE_GEMM_EXT_FRONTEND) test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } /* run performance */ gemm_param.c.primary = (void*)o_c_perf; l_start = libxsmm_timer_tick(); if (i_gemm_def->br_type == 0) { gemm_param.a.primary = (void*)i_a; gemm_param.b.primary = (void*)i_b; if ( l_info.prefetch != LIBXSMM_GEMM_PREFETCH_NONE ) { gemm_param.a.quaternary = (void*)i_a; gemm_param.b.quaternary = (void*)i_b; gemm_param.c.quaternary = (void*)o_c_perf; } for (l_t = 0; l_t < i_reps; l_t++) { #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } } else if (i_gemm_def->br_type == 1) { gemm_param.a.primary = l_a_addr; gemm_param.b.primary = l_b_addr; for (l_t = 0; l_t < i_reps; l_t++) { for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) { l_a_addr[l_r] = (char*)i_a + (l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type)); if (i_gemm_def->trans_b == 0) { l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type)); } else { l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type)); } } #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } } else if (i_gemm_def->br_type == 2) { gemm_param.a.primary = (void*)i_a; gemm_param.a.secondary = l_a_offs; gemm_param.b.primary = (void*)i_b; gemm_param.b.secondary = l_b_offs; for (l_t = 0; l_t < i_reps; l_t++) { #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } } else if (i_gemm_def->br_type == 3) { gemm_param.a.primary = (void*)i_a; gemm_param.b.primary = (void*)i_b; for (l_t = 0; l_t < i_reps; l_t++) { #if defined(USE_GEMM_EXT_FRONTEND) l_test_jit.gemm_ext( &gemm_param ); #else l_test_jit.gemm( &gemm_param ); #endif } } l_runtime = libxsmm_timer_duration(l_start, libxsmm_timer_tick()); /* run external tilerelease */ if (i_gemm_def->tc_config) { rls_tr.gemm( NULL ); } if ( i_print_jit_info == 0 ) { printf("function pointer address: %llx\n", (unsigned long long)l_test_jit.xmm); printf("%fs for creating jit\n", l_jittime); } free( (void*)l_a_addr ); free( (void*)l_b_addr ); free( (void*)l_a_offs ); free( (void*)l_b_offs ); return l_runtime; } void print_help(void) { printf("\n\n"); printf("1. Usage (dense*dense=dense, correctness and performance):\n"); printf(" M\n"); printf(" N\n"); printf(" K\n"); printf(" LDA\n"); printf(" LDB\n"); printf(" LDC\n"); printf(" alpha: 1\n"); printf(" beta: 0 or 1\n"); printf(" 0: unaligned A, otherwise aligned\n"); printf(" 0: unaligned C, otherwise aligned\n"); printf(" 0: A normal, 1: A trans\n"); printf(" 0: B normal, 1: B trans\n"); printf(" PREFETCH: nopf (none), pfsigonly, BL2viaC, AL2, curAL2, AL2_BL2viaC, curAL2_BL2viaC\n"); printf(" PRECISION: SP, DP, I16I32, USI8I32, SUI8I32, SUI8UI8, BF16F32, BF16, BF16F32_FLAT, BF16_FLAT\n"); printf(" BRGEMM: nobr, addrbr, offsbr, strdbr\n"); printf(" BRsize: 1 - N\n"); printf(" BRunroll: 0/1\n"); printf(" #repetitions\n"); printf(" tile configuration: 1 - external, 0 - internal\n"); printf("\n\n"); printf("2. Usage (dense*dense=dense, performance only option available):\n"); printf(" filename with space-sperated sizes (M N K LDA LDB LDC)\n"); printf(" alpha: 1\n"); printf(" beta: 0 or 1\n"); printf(" 0: unaligned A, otherwise aligned\n"); printf(" 0: unaligned C, otherwise aligned\n"); printf(" 0: A normal, 1: A trans\n"); printf(" 0: B normal, 1: B trans\n"); printf(" PRECISION: SP, DP, I16I32, USI8I32, SUI8I32, SUI8UI8, BF16F32, BF16, BF16F32_FLAT, BF16_FLAT\n"); printf(" BRGEMM: nobr, addrbr, offsbr, strdbr\n"); printf(" BRsize: 1 - N\n"); printf(" BRunroll: 0/1\n"); printf(" #repetitions\n"); printf(" 0: no check, otherwise: run check\n"); printf(" tile configuration: 1 - external, 0 - internal\n"); printf("\n\n"); } int main(int argc, char* argv []) { char* l_precision = NULL; libxsmm_blasint l_lda = 0, l_ldb = 0, l_ldc = 0; libxsmm_blasint l_m = 0, l_n = 0, l_k = 0; int l_aligned_a = 0; int l_aligned_c = 0; int l_trans_a = 0; int l_trans_b = 0; double l_alpha = 0; double l_beta = 0; int l_br = 1; int l_br_type = 0; int l_br_unroll = 0; double l_runtime_libxsmm = 0; int l_file_input = 0; char* l_file_name = NULL; FILE *l_file_handle = NULL; int l_run_check = 0; double l_total_max_error = 0.0; int l_tc_config = 0; int l_reps; libxsmm_gemm_prefetch_type l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE; gemm_def l_gemm_def; int l_n_threads = 1; # if defined(__APPLE__) && defined(__arm64__) # if 1 pthread_set_qos_class_self_np( QOS_CLASS_USER_INTERACTIVE, 0 ); # else pthread_set_qos_class_self_np( QOS_CLASS_BACKGROUND, 0 ); # endif # endif /* check argument count for a valid range */ if ( argc == 20 || argc == 19 ) { /* xgemm sizes */ l_m = atoi(argv[1]); l_n = atoi(argv[2]); l_k = atoi(argv[3]); l_lda = atoi(argv[4]); l_ldb = atoi(argv[5]); l_ldc = atoi(argv[6]); /* some sugar */ l_alpha = atof(argv[7]); l_beta = atof(argv[8]); l_aligned_a = atoi(argv[9]); l_aligned_c = atoi(argv[10]); l_trans_a = atoi(argv[11]); l_trans_b = atoi(argv[12]); /* arch specific stuff */ l_precision = argv[14]; l_br = atoi(argv[16]); l_br_unroll = atoi(argv[17]); l_reps = atoi(argv[18]); if ( argc == 20 ) { l_tc_config = atoi(argv[19]); } else { l_tc_config = 0; } /* set value of prefetch flag */ if (strcmp("nopf", argv[13]) == 0) { l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE; } else if (strcmp("pfsigonly", argv[13]) == 0) { l_prefetch = LIBXSMM_GEMM_PREFETCH_SIGONLY; } else if (strcmp("BL2viaC", argv[13]) == 0) { l_prefetch = LIBXSMM_GEMM_PREFETCH_BL2_VIA_C; } else if (strcmp("curAL2", argv[13]) == 0) { l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2_AHEAD; } else if (strcmp("curAL2_BL2viaC", argv[13]) == 0) { l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD; } else if (strcmp("AL2", argv[13]) == 0) { l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2; } else if (strcmp("AL2_BL2viaC", argv[13]) == 0) { l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C; } else { print_help(); return EXIT_FAILURE; } if (strcmp("nobr", argv[15]) == 0) { l_br_type = 0; } else if (strcmp("addrbr", argv[15]) == 0) { l_br_type = 1; } else if (strcmp("offsbr", argv[15]) == 0) { l_br_type = 2; } else if (strcmp("strdbr", argv[15]) == 0) { l_br_type = 3; } else { print_help(); return EXIT_FAILURE; } l_file_input = 0; l_run_check = 1; } else if ( argc == 15 || argc == 14 ) { l_file_input = 1; l_file_name = argv[1]; l_alpha = atof(argv[2]); l_beta = atof(argv[3]); l_aligned_a = atoi(argv[4]); l_aligned_c = atoi(argv[5]); l_trans_a = atoi(argv[6]); l_trans_b = atoi(argv[7]); l_precision = argv[8]; l_br = atoi(argv[10]); l_br_unroll = atoi(argv[11]); if ( argc == 15 ) { l_tc_config = atoi(argv[14]); } else { l_tc_config = 0; } if (strcmp("nobr", argv[9]) == 0) { l_br_type = 0; } else if (strcmp("addrbr", argv[9]) == 0) { l_br_type = 1; } else if (strcmp("offsbr", argv[9]) == 0) { l_br_type = 2; } else if (strcmp("strdbr", argv[9]) == 0) { l_br_type = 3; } else { print_help(); return EXIT_FAILURE; } l_reps = atoi(argv[12]); l_run_check = atoi(argv[13]); l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE; } else { print_help(); return EXIT_FAILURE; } const char *env_arch = getenv("LIBXSMM_TARGET"); const int is_env_SPR = ( env_arch == libxsmm_stristr(env_arch, "spr") || env_arch == libxsmm_stristr(env_arch, "amx")); int arch_cpuid = libxsmm_cpuid(); if ((!is_env_SPR && arch_cpuid < LIBXSMM_X86_AVX512_SPR) && (l_tc_config)) { printf("Warning: external tile configuration will be ingnored\n"); l_tc_config = 0; } l_br = (l_br < 1) ? 1 : l_br; l_br = (l_br_type == 0) ? 1 : l_br; l_br_unroll = (l_br_type == 0) ? 0 : l_br_unroll; /* check alpha */ if ( LIBXSMM_NEQ(l_alpha, 1.0) ) { fprintf(stderr, "JIT: alpha needs to be 1.0!\n"); exit(EXIT_FAILURE); } /* check beta */ if ( LIBXSMM_NEQ(l_beta, 0.0) && LIBXSMM_NEQ(l_beta, 1.0) ) { fprintf(stderr, "JIT: beta needs to be 0.0 or 1.0!\n"); exit(EXIT_FAILURE); } /* setting static GEMM parameters */ l_gemm_def.alpha = l_alpha; l_gemm_def.beta = l_beta; l_gemm_def.trans_a = l_trans_a; l_gemm_def.trans_b = l_trans_b; l_gemm_def.vnni_a = 0; l_gemm_def.vnni_b = 0; l_gemm_def.vnni_c = 0; l_gemm_def.unsigned_a = 0; l_gemm_def.unsigned_b = 0; l_gemm_def.unsigned_c = 0; l_gemm_def.aligned_a = l_aligned_a; l_gemm_def.aligned_c = l_aligned_c; l_gemm_def.prefetch = l_prefetch; l_gemm_def.br_type = l_br_type; l_gemm_def.br_count = l_br; l_gemm_def.br_unroll = l_br_unroll; l_gemm_def.tc_config = l_tc_config; l_gemm_def.scf = 0.0; /* setting precision in GEMM struct */ if ( (strcmp(l_precision, "DP") == 0) ) { l_gemm_def.in_type = LIBXSMM_DATATYPE_F64; l_gemm_def.out_type = LIBXSMM_DATATYPE_F64; l_gemm_def.comp_type = LIBXSMM_DATATYPE_F64; } else if ( (strcmp(l_precision, "SP") == 0) ) { l_gemm_def.in_type = LIBXSMM_DATATYPE_F32; l_gemm_def.out_type = LIBXSMM_DATATYPE_F32; l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32; } else if ( (strcmp(l_precision, "I16I32") == 0) ) { l_gemm_def.in_type = LIBXSMM_DATATYPE_I16; l_gemm_def.out_type = LIBXSMM_DATATYPE_I32; l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32; l_gemm_def.vnni_a = 1; l_gemm_def.trans_a = 0; l_gemm_def.trans_b = 0; } else if (strcmp(l_precision, "USI8I32") == 0) { l_gemm_def.in_type = LIBXSMM_DATATYPE_I8; l_gemm_def.out_type = LIBXSMM_DATATYPE_I32; l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32; l_gemm_def.vnni_a = 1; l_gemm_def.trans_a = 0; l_gemm_def.trans_b = 0; l_gemm_def.unsigned_a = 1; } else if (strcmp(l_precision, "SUI8I32") == 0) { l_gemm_def.in_type = LIBXSMM_DATATYPE_I8; l_gemm_def.out_type = LIBXSMM_DATATYPE_I32; l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32; l_gemm_def.vnni_a = 1; l_gemm_def.trans_a = 0; l_gemm_def.trans_b = 0; l_gemm_def.unsigned_b = 1; } else if (strcmp(l_precision, "SUI8UI8") == 0) { l_gemm_def.in_type = LIBXSMM_DATATYPE_I8; l_gemm_def.out_type = LIBXSMM_DATATYPE_I32; l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32; l_gemm_def.vnni_a = 1; l_gemm_def.trans_a = 0; l_gemm_def.trans_b = 0; l_gemm_def.unsigned_b = 1; l_gemm_def.unsigned_c = 1; l_gemm_def.scf = 1.0f; } else if (strcmp(l_precision, "BF16F32") == 0) { l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16; l_gemm_def.out_type = LIBXSMM_DATATYPE_F32; l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32; l_gemm_def.vnni_a = 1; l_gemm_def.trans_a = 0; l_gemm_def.trans_b = 0; } else if (strcmp(l_precision, "BF16") == 0) { l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16; l_gemm_def.out_type = LIBXSMM_DATATYPE_BF16; l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32; l_gemm_def.vnni_a = 1; l_gemm_def.trans_a = 0; l_gemm_def.trans_b = 0; } else if (strcmp(l_precision, "BF16F32_FLAT") == 0) { l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16; l_gemm_def.out_type = LIBXSMM_DATATYPE_F32; l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32; } else if (strcmp(l_precision, "BF16_FLAT") == 0) { l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16; l_gemm_def.out_type = LIBXSMM_DATATYPE_BF16; l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32; } else { fprintf(stderr, "Unsupported precision %s!\n", l_precision); exit(EXIT_FAILURE); } if ( l_file_input != 0 ) { l_file_handle = fopen( l_file_name, "r" ); } else { if ( l_trans_b == 0 ) { printf("------------------------------------------------\n"); printf("RUNNING (%ix%i) X (%ix%i) = (%ix%i), %s, BR=%i\n", l_m, l_k, l_k, l_n, l_m, l_n, l_precision, l_br); printf("------------------------------------------------\n"); } else { printf("------------------------------------------------\n"); printf("RUNNING (%ix%i) X (%ix%i)^T = (%ix%i), %s, BR=%i\n", l_m, l_k, l_k, l_n, l_m, l_n, l_precision, l_br); printf("------------------------------------------------\n"); } } /* read the number of threads */ #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_n_threads = omp_get_num_threads(); } } #endif unsigned int l_keep_going = 0; do { double error = 0.0; if ( l_file_input != 0 ) { char l_line[512]; if ( fgets( l_line, 512, l_file_handle) == NULL ) { l_keep_going = 0; break; } else { l_keep_going = 1; } if ( 6 != sscanf( l_line, "%i %i %i %i %i %i", &l_m, &l_n, &l_k, &l_lda, &l_ldb, &l_ldc ) ) exit(EXIT_FAILURE); } l_gemm_def.m = l_m; l_gemm_def.n = l_n; l_gemm_def.k = l_k; l_gemm_def.lda = l_lda; l_gemm_def.ldb = l_ldb; l_gemm_def.ldc = l_ldc; l_runtime_libxsmm = 0; #if defined(_OPENMP) #pragma omp parallel reduction(+:l_runtime_libxsmm) #endif { char *l_a, *l_b, *l_c, *l_c_perf, *l_c_gold; l_a = (char*)libxsmm_aligned_malloc((size_t)l_lda * (size_t)l_k * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64); if (l_gemm_def.trans_b == 0) { l_b = (char*)libxsmm_aligned_malloc((size_t)l_ldb * (size_t)l_n * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64); } else { l_b = (char*)libxsmm_aligned_malloc((size_t)l_ldb * (size_t)l_k * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64); } l_c = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64); l_c_perf = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64); l_c_gold = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64); init_random_matrix( l_gemm_def.in_type, l_a, l_br, l_lda, l_k ); if (l_gemm_def.trans_b == 0) { init_random_matrix( l_gemm_def.in_type, l_b, l_br, l_ldb, l_n ); } else { init_random_matrix( l_gemm_def.in_type, l_b, l_br, l_ldb, l_k ); } if ( l_beta == 0 ) { init_garbage_matrix( l_gemm_def.out_type, l_c, 1, l_ldc, l_n ); init_garbage_matrix( l_gemm_def.out_type, l_c_perf, 1, l_ldc, l_n ); init_garbage_matrix( l_gemm_def.out_type, l_c_gold, 1, l_ldc, l_n ); } else { init_zero_matrix( l_gemm_def.out_type, l_c, 1, l_ldc, l_n ); init_zero_matrix( l_gemm_def.out_type, l_c_perf, 1, l_ldc, l_n ); init_zero_matrix( l_gemm_def.out_type, l_c_gold, 1, l_ldc, l_n ); } /* run gold solution */ #pragma omp master { ref_matmul( &l_gemm_def, l_a, l_b, l_c_gold ); } /* run LIBXSMM solution */ l_runtime_libxsmm = jit_matmul( &l_gemm_def, l_a, l_b, l_c, l_c_perf, l_reps, l_file_input ); /* run compare */ #pragma omp master { error = check_matrix( l_gemm_def.out_type, l_c_gold, l_c, l_ldc, l_m, l_n ); } libxsmm_free(l_a); libxsmm_free(l_b); libxsmm_free(l_c); libxsmm_free(l_c_perf); libxsmm_free(l_c_gold); } l_runtime_libxsmm /= (double)l_n_threads; if ( l_file_input == 0 ) { printf("%fs for libxsmm\n", l_runtime_libxsmm); printf("%f GFLOPS for libxsmm\n", ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9)); printf("max. error: %f\n", error); } else { if ( l_run_check == 1 ) { printf("%i %i %i %i %i %i %i %i %i %s %f %f\n", l_m, l_n, l_k, l_lda, l_ldb, l_ldc, l_br, l_br_type, l_br_unroll, l_precision, ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9), error ); } else { printf("%i %i %i %i %i %i %i %i %i %s %f\n", l_m, l_n, l_k, l_lda, l_ldb, l_ldc, l_br, l_br_type, l_br_unroll, l_precision, ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9) ); } } if ( (l_total_max_error < error) && (l_run_check == 1) ) { l_total_max_error = error; } } while ( l_keep_going ); if ( l_file_input != 0 ) { fclose( l_file_handle ); } else { printf("------------------------------------------------\n"); } /* Print total max error */ printf("\n\n Total Max Error %f\n\n", l_total_max_error ); if ( l_total_max_error >= 0.00005 && l_br_type == 0) { return EXIT_FAILURE; } else if ( l_total_max_error >= 0.0005 && l_br_type > 0) { return EXIT_FAILURE; } else { return EXIT_SUCCESS; } }
globalsums.c
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <omp.h> double do_sum_novec(double *var, long ncells); double do_sum(double *var, long ncells); double do_kahan_sum(double *var, long ncells); double do_serial_sum_fog_v(double *var, long ncells); double do_serial_sum_fog_v8(double *var, long ncells); void cpu_timer_start(struct timespec *tstart_cpu); double cpu_timer_stop(struct timespec tstart_cpu); #define ORDERS_OF_MAGNITUDE 1.0e9; typedef unsigned int uint; int main(int argc, char *argv[]) { #pragma omp parallel if (omp_get_thread_num() == 0) printf("\n Running with %d thread(s)\n\n",omp_get_num_threads()); //for (int pow_of_two = 8; pow_of_two < 31; pow_of_two++){ for (int pow_of_two = 30; pow_of_two < 31; pow_of_two++){ long ncells = (long)pow((double)2,(double)pow_of_two); long ncellsdiv2 = ncells/2; printf("SETTINGS INFO -- ncells %ld log %d\n",ncells,(int)log2((double)ncells)); double high_value = 1.0e-1; double low_value = 1.0e-1/ORDERS_OF_MAGNITUDE; double accurate_sum = (double)ncellsdiv2 * high_value + (double)ncellsdiv2 * low_value; double *energy = (double *)malloc(ncells*sizeof(double)); // Initialize with high values first printf("Initializing mesh with Leblanc problem, high values first\n"); for (long i = 0; i < ncells; i++){ energy[i] = (i < ncellsdiv2) ? high_value : low_value; } double test_sum; struct timespec cpu_timer; double cpu_time; //****************************************************** cpu_timer_start(&cpu_timer); test_sum = do_sum_novec(energy, ncells); cpu_time = cpu_timer_stop(cpu_timer); printf(" accurate sum %-17.16lg sum %-17.16lg diff %10.4lg relative diff %10.4lg runtime %lf", accurate_sum,test_sum,(test_sum-accurate_sum),((test_sum-accurate_sum)/accurate_sum), cpu_time); printf(" Serial sum\n"); //****************************************************** cpu_timer_start(&cpu_timer); test_sum = do_kahan_sum(energy, ncells); cpu_time = cpu_timer_stop(cpu_timer); printf(" accurate sum %-17.16lg sum %-17.16lg diff %10.4lg relative diff %10.4lg runtime %lf", accurate_sum,test_sum,(test_sum-accurate_sum),((test_sum-accurate_sum)/accurate_sum), cpu_time); printf(" Kahan sum serial with double double accumulator\n"); //****************************************************** cpu_timer_start(&cpu_timer); #pragma omp parallel test_sum = do_kahan_sum(energy, ncells); cpu_time = cpu_timer_stop(cpu_timer); printf(" accurate sum %-17.16lg sum %-17.16lg diff %10.4lg relative diff %10.4lg runtime %lf", accurate_sum,test_sum,(test_sum-accurate_sum),((test_sum-accurate_sum)/accurate_sum), cpu_time); printf(" Kahan sum threaded with double double accumulator\n"); //****************************************************** printf(" 4 wide vectors serial sum\n"); //****************************************************** cpu_timer_start(&cpu_timer); test_sum = do_sum(energy, ncells); cpu_time = cpu_timer_stop(cpu_timer); printf(" accurate sum %-17.16lg sum %-17.16lg diff %10.4lg relative diff %10.4lg runtime %lf", accurate_sum,test_sum,(test_sum-accurate_sum),((test_sum-accurate_sum)/accurate_sum), cpu_time); printf(" Serial sum (OpenMP SIMD pragma)\n"); //****************************************************** free(energy); printf("\n"); } } void cpu_timer_start(struct timespec *tstart_cpu){ clock_gettime(CLOCK_MONOTONIC, tstart_cpu); } double cpu_timer_stop(struct timespec tstart_cpu){ struct timespec tstop_cpu, tresult; clock_gettime(CLOCK_MONOTONIC, &tstop_cpu); tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec; tresult.tv_nsec = tstop_cpu.tv_nsec - tstart_cpu.tv_nsec; double result = (double)tresult.tv_sec + (double)tresult.tv_nsec*1.0e-9; return(result); }
2Dfold.c
/* * minimum free energy * RNA secondary structure with * basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2 * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/fold.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/params/basic.h" #ifdef _OPENMP #include <omp.h> #endif #include "ViennaRNA/2Dfold.h" /* ################################# # GLOBAL VARIABLES # ################################# */ int compute_2Dfold_F3 = 0; /* ################################# # PRIVATE VARIABLES # ################################# */ /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void mfe_linear(vrna_fold_compound_t *vc); PRIVATE void mfe_circ(vrna_fold_compound_t *vc); PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars); PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_fc(int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, vrna_fold_compound_t *vc); PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_real, int k_max_real, int *l_min_real, int *l_max_real); INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l); INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ #if 0 PRIVATE void initialize_TwoDfold_vars(TwoDfold_vars *vars) { update_TwoDfold_params(vars); /* this call updates the params in the ViennaRNA fold.o which is a global, so be careful * whith calling it parallel... need a workarround or fix of ViennaRNA fold stuff */ update_fold_params(); } PUBLIC TwoDfold_solution ** TwoDfold(TwoDfold_vars *vars, int distance1, int distance2) { unsigned int i, d1, d2; unsigned int maxD1; unsigned int maxD2; unsigned int length; TwoDfold_solution **output; initialize_TwoDfold_vars(vars); if (fabs(vars->P->temperature - temperature) > 1e-6) update_TwoDfold_params(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes(vars); maxD1 = vars->maxD1; maxD2 = vars->maxD2; if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); else maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); else maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (TwoDfold_solution **)vrna_alloc((vars->maxD1 + 1) * sizeof(TwoDfold_solution *)); mfe_linear(vars); if (vars->circ) mfe_circ(vars); length = vars->seq_length; for (d1 = 0; d1 <= maxD1; d1++) { output[d1] = (TwoDfold_solution *)vrna_alloc((vars->maxD2 + 1) * sizeof(TwoDfold_solution)); #ifdef _OPENMP #pragma omp parallel for private(d2) #endif for (d2 = 0; d2 <= maxD2; d2++) { output[d1][d2].en = (float)INF / (float)100.; output[d1][d2].s = NULL; } if ((d1 >= ((vars->circ) ? vars->k_min_values_fc : vars->k_min_values_f[length])) && (d1 <= ((vars->circ) ? vars->k_max_values_fc : vars->k_max_values_f[length]))) { #ifdef _OPENMP #pragma omp parallel for private(d2, i) #endif for (d2 = ((vars->circ) ? vars->l_min_values_fc[d1] : vars->l_min_values_f[length][d1]); d2 <= ((vars->circ) ? vars->l_max_values_fc[d1] : vars->l_max_values_f[length][d1]); d2 += 2) { output[d1][d2].en = (float)((vars->circ) ? vars->E_Fc[d1][d2 / 2] : vars->E_F5[length][d1][d2 / 2]) / (float)100.; if (vars->do_backtrack && (output[d1][d2].en != (float)INF / (float)100.)) { char *mfe_structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; (vars->circ) ? backtrack_fc(d1, d2, mfe_structure, vars) : backtrack_f5(length, d1, d2, mfe_structure, vars); output[d1][d2].s = mfe_structure; } } } } return output; } #endif PUBLIC vrna_sol_TwoD_t * vrna_mfe_TwoD(vrna_fold_compound_t *vars, int distance1, int distance2) { unsigned int i, d1, d2; unsigned int maxD1; unsigned int maxD2; unsigned int length; unsigned int counter = 0; int en = 0; vrna_sol_TwoD_t *output; vrna_md_t *md; vrna_mx_mfe_t *matrices; maxD1 = vars->maxD1; maxD2 = vars->maxD2; matrices = vars->matrices; md = &(vars->params->model_details); if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) vrna_message_warning("vrna_mfe_TwoD@2Dfold.c: limiting maximum basepair distance 1 to %u\n", maxD1); else maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) vrna_message_warning("vrna_mfe_TwoD@2Dfold.c: limiting maximum basepair distance 2 to %u\n", maxD2); else maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (vrna_sol_TwoD_t *)vrna_alloc((((vars->maxD1 + 1) * (vars->maxD2 + 2)) / 2 + 2) * sizeof(vrna_sol_TwoD_t)); mfe_linear(vars); if (md->circ) mfe_circ(vars); length = vars->length; for (d1 = 0; d1 <= maxD1; d1++) { if ((d1 >= ((md->circ) ? matrices->k_min_Fc : matrices->k_min_F5[length])) && (d1 <= ((md->circ) ? matrices->k_max_Fc : matrices->k_max_F5[length]))) { for (d2 = ((md->circ) ? matrices->l_min_Fc[d1] : matrices->l_min_F5[length][d1]); d2 <= ((md->circ) ? matrices->l_max_Fc[d1] : matrices->l_max_F5[length][d1]); d2 += 2) { en = ((md->circ) ? matrices->E_Fc[d1][d2 / 2] : matrices->E_F5[length][d1][d2 / 2]); if (en == INF) continue; output[counter].k = d1; output[counter].l = d2; output[counter].en = (float)en / (float)100.; if (md->backtrack) { char *mfe_structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; (md->circ) ? backtrack_fc((int)d1, (int)d2, mfe_structure, vars) : backtrack_f5(length, (int)d1, (int)d2, mfe_structure, vars); output[counter].s = mfe_structure; } else { output[counter].s = NULL; } counter++; } } } /* store entry for remaining partition if it exists */ en = ((md->circ) ? matrices->E_Fc_rem : matrices->E_F5_rem[length]); if (en != INF) { output[counter].k = -1; output[counter].l = -1; output[counter].en = (float)en / (float)100.; if (md->backtrack) { char *mfe_structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; (md->circ) ? backtrack_fc(-1, -1, mfe_structure, vars) : backtrack_f5(length, -1, -1, mfe_structure, vars); output[counter].s = mfe_structure; } else { output[counter].s = NULL; } counter++; } /* insert end-marker entry */ output[counter].k = output[counter].l = INF; counter++; /* resize to actual dataset amount */ output = (vrna_sol_TwoD_t *)vrna_realloc(output, sizeof(vrna_sol_TwoD_t) * counter); return output; } PUBLIC char * vrna_backtrack5_TwoD(vrna_fold_compound_t *vc, int k, int l, unsigned int j) { unsigned int i; char *mfe_structure = (char *)vrna_alloc(j + 1); if (j < TURN + 2) return NULL; for (i = 0; i < j; i++) mfe_structure[i] = '.'; mfe_structure[i] = '\0'; backtrack_f5(j, k, l, mfe_structure, vc); return mfe_structure; } PRIVATE void mfe_linear(vrna_fold_compound_t *vc) { unsigned int d, i, j, ij, maxD1, maxD2, seq_length, dia, dib, dja, djb, *referenceBPs1, *referenceBPs2, *mm1, *mm2, *bpdist; int cnt1, cnt2, cnt3, cnt4, d1, d2, energy, dangles, temp2, type, additional_en, *my_iindx, *jindx, circ, *rtype; short *S1, *reference_pt1, *reference_pt2; char *sequence, *ptype; vrna_param_t *P; vrna_mx_mfe_t *matrices; vrna_md_t *md; /* dereferenciate things we often need */ P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); reference_pt1 = vc->reference_pt1; reference_pt2 = vc->reference_pt2; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; dangles = md->dangles; circ = md->circ; for (d = TURN + 2; d <= seq_length; d++) { /* i,j in [1..length] */ #ifdef _OPENMP #pragma omp parallel for private(additional_en, j, energy, temp2, i, ij, dia,dib,dja,djb,cnt1,cnt2,cnt3,cnt4, d1, d2) #endif for (j = d; j <= seq_length; j++) { unsigned int p, q, pq, u, maxp, dij; int type_2, type, tt, no_close, base_d1, base_d2; i = j - d + 1; dij = j - i - 1; ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); if (type) { /* we have a pair */ /* increase or decrease distance-to-reference value depending whether (i,j) is included in * reference or has to be introduced */ base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; /* HAIRPIN STRUCTURES */ /* get distance to reference if closing the hairpin * d = dbp(T_{i,j}, {i,j}) */ d1 = base_d1 + referenceBPs1[ij]; d2 = base_d2 + referenceBPs2[ij]; int min_k, max_k, min_l, max_l; int real_min_k, real_max_k, *min_l_real, *max_l_real; min_l = min_k = 0; max_k = mm1[ij] + referenceBPs1[ij]; max_l = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[ij], &matrices->k_min_C[ij], &matrices->k_max_C[ij], &matrices->l_min_C[ij], &matrices->l_max_C[ij] ); preparePosteriorBoundaries(matrices->k_max_C[ij] - matrices->k_min_C[ij] + 1, matrices->k_min_C[ij], &real_min_k, &real_max_k, &min_l_real, &max_l_real ); prepareArray(&matrices->E_C[ij], matrices->k_min_C[ij], matrices->k_max_C[ij], matrices->l_min_C[ij], matrices->l_max_C[ij] ); #ifdef COUNT_STATES prepareArray2(&matrices->N_C[ij], matrices->k_min_C[ij], matrices->k_max_C[ij], matrices->l_min_C[ij], matrices->l_max_C[ij] ); #endif /* d1 and d2 are the distancies to both references introduced by closing a hairpin structure at (i,j) */ if ((d1 >= 0) && (d2 >= 0)) { if (((unsigned int)d1 <= maxD1) && ((unsigned int)d2 <= maxD2)) { matrices->E_C[ij][d1][d2 / 2] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, P); updatePosteriorBoundaries(d1, d2, &real_min_k, &real_max_k, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_C[ij][d1][d2 / 2] = 1; #endif } else { matrices->E_C_rem[ij] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, P); } } /* INTERIOR LOOP STRUCTURES */ maxp = MIN2(j - 2 - TURN, i + MAXLOOP + 1); for (p = i + 1; p <= maxp; p++) { unsigned int minq = p + TURN + 1; unsigned int ln_pre = dij + p; if (ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1; for (q = minq; q < j; q++) { pq = my_iindx[p] - q; /* set distance to reference structure... */ type_2 = ptype[jindx[q] + p]; if (type_2 == 0) continue; type_2 = rtype[type_2]; /* get distance to reference if closing the interior loop * d2 = dbp(S_{i,j}, S_{p.q} + {i,j}) */ d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[pq]; if (no_closingGU) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; /* continue unless stack */ energy = E_IntLoop(p - i - 1, j - q - 1, type, type_2, S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); if (matrices->E_C[pq] != NULL) { for (cnt1 = matrices->k_min_C[pq]; cnt1 <= matrices->k_max_C[pq]; cnt1++) { for (cnt2 = matrices->l_min_C[pq][cnt1]; cnt2 <= matrices->l_max_C[pq][cnt1]; cnt2 += 2) { if (matrices->E_C[pq][cnt1][cnt2 / 2] != INF) { if (((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)) { matrices->E_C[ij][cnt1 + d1][(cnt2 + d2) / 2] = MIN2(matrices->E_C[ij][cnt1 + d1][(cnt2 + d2) / 2], matrices->E_C[pq][cnt1][cnt2 / 2] + energy ); updatePosteriorBoundaries(cnt1 + d1, cnt2 + d2, &real_min_k, &real_max_k, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_C[ij][cnt1 + d1][(cnt2 + d2) / 2] += matrices->N_C[pq][cnt1][cnt2 / 2]; #endif } /* collect all cases where d1+cnt1 or d2+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_C[pq][cnt1][cnt2 / 2] + energy); } } } } } /* collect all contributions where C[pq] already lies outside k_max, l_max boundary */ if (matrices->E_C_rem[pq] != INF) matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_C_rem[pq] + energy); } /* end q-loop */ } /* end p-loop */ /* MULTI LOOP STRUCTURES */ if (!no_close) { /* dangle energies for multiloop closing stem */ tt = rtype[type]; temp2 = P->MLclosing; if (dangles == 2) temp2 += E_MLstem(tt, S1[j - 1], S1[i + 1], P); else temp2 += E_MLstem(tt, -1, -1, P); for (u = i + TURN + 2; u < j - TURN - 2; u++) { int i1u = my_iindx[i + 1] - u; int u1j1 = my_iindx[u + 1] - j + 1; /* check all cases where either M or M1 are already out of scope of maxD1 and/or maxD2 */ if (matrices->E_M_rem[i1u] != INF) { for (cnt3 = matrices->k_min_M1[u1j1]; cnt3 <= matrices->k_max_M1[u1j1]; cnt3++) for (cnt4 = matrices->l_min_M1[u1j1][cnt3]; cnt4 <= matrices->l_max_M1[u1j1][cnt3]; cnt4 += 2) { if (matrices->E_M1[u1j1][cnt3][cnt4 / 2] != INF) { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M_rem[i1u] + matrices->E_M1[u1j1][cnt3][cnt4 / 2] + temp2 ); } } if (matrices->E_M1_rem[u1j1] != INF) { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M_rem[i1u] + matrices->E_M1_rem[u1j1] + temp2 ); } } if (matrices->E_M1_rem[u1j1] != INF) { for (cnt1 = matrices->k_min_M[i1u]; cnt1 <= matrices->k_max_M[i1u]; cnt1++) for (cnt2 = matrices->l_min_M[i1u][cnt1]; cnt2 <= matrices->l_max_M[i1u][cnt1]; cnt2 += 2) if (matrices->E_M[i1u][cnt1][cnt2 / 2] != INF) { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M[i1u][cnt1][cnt2 / 2] + matrices->E_M1_rem[u1j1] + temp2 ); } } /* get distance to reference if closing the multiloop * d = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1}) */ if (!matrices->E_M[i1u]) continue; if (!matrices->E_M1[u1j1]) continue; d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[i1u] - referenceBPs1[u1j1]; d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[i1u] - referenceBPs2[u1j1]; for (cnt1 = matrices->k_min_M[i1u]; cnt1 <= matrices->k_max_M[i1u]; cnt1++) for (cnt2 = matrices->l_min_M[i1u][cnt1]; cnt2 <= matrices->l_max_M[i1u][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_M1[u1j1]; cnt3 <= matrices->k_max_M1[u1j1]; cnt3++) for (cnt4 = matrices->l_min_M1[u1j1][cnt3]; cnt4 <= matrices->l_max_M1[u1j1][cnt3]; cnt4 += 2) { if ((matrices->E_M[i1u][cnt1][cnt2 / 2] != INF) && (matrices->E_M1[u1j1][cnt3][cnt4 / 2] != INF)) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(matrices->E_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], matrices->E_M[i1u][cnt1][cnt2 / 2] + matrices->E_M1[u1j1][cnt3][cnt4 / 2] + temp2 ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &real_min_k, &real_max_k, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] += matrices->N_M[i1u][cnt1][cnt2 / 2] * matrices->N_M1[u1j1][cnt3][cnt4 / 2]; #endif } /* collect all cases where d1+cnt1+cnt3 or d2+cnt2+cnt4 exceeds maxD1, maxD2, respectively */ else { matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_M[i1u][cnt1][cnt2 / 2] + matrices->E_M1[u1j1][cnt3][cnt4 / 2] + temp2 ); } } } } } /* resize and move memory portions of energy matrix E_C */ adjustArrayBoundaries(&matrices->E_C[ij], &matrices->k_min_C[ij], &matrices->k_max_C[ij], &matrices->l_min_C[ij], &matrices->l_max_C[ij], real_min_k, real_max_k, min_l_real, max_l_real ); #ifdef COUNT_STATES /* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/ #endif } /* end >> if (pair) << */ /* done with c[i,j], now compute fML[i,j] */ /* free ends ? -----------------------------------------*/ dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i + 1] - j]; dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i + 1] - j]; dja = referenceBPs1[ij] - referenceBPs1[ij + 1]; djb = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (dangles == 2) temp2 = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else temp2 = E_MLstem(type, -1, -1, P); int min_k_guess, max_k_guess, min_l_guess, max_l_guess; int min_k_real_m, max_k_real_m, *min_l_real_m, *max_l_real_m; int min_k_real_m1, max_k_real_m1, *min_l_real_m1, *max_l_real_m1; min_k_guess = min_l_guess = 0; max_k_guess = mm1[ij] + referenceBPs1[ij]; max_l_guess = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[ij], &matrices->k_min_M[ij], &matrices->k_max_M[ij], &matrices->l_min_M[ij], &matrices->l_max_M[ij] ); prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[ij], &matrices->k_min_M1[ij], &matrices->k_max_M1[ij], &matrices->l_min_M1[ij], &matrices->l_max_M1[ij] ); preparePosteriorBoundaries(matrices->k_max_M[ij] - matrices->k_min_M[ij] + 1, matrices->k_min_M[ij], &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); preparePosteriorBoundaries(matrices->k_max_M1[ij] - matrices->k_min_M1[ij] + 1, matrices->k_min_M1[ij], &min_k_real_m1, &max_k_real_m1, &min_l_real_m1, &max_l_real_m1 ); prepareArray(&matrices->E_M[ij], matrices->k_min_M[ij], matrices->k_max_M[ij], matrices->l_min_M[ij], matrices->l_max_M[ij] ); prepareArray(&matrices->E_M1[ij], matrices->k_min_M1[ij], matrices->k_max_M1[ij], matrices->l_min_M1[ij], matrices->l_max_M1[ij] ); #ifdef COUNT_STATES prepareArray2(&matrices->N_M[ij], matrices->k_min_M[ij], matrices->k_max_M[ij], matrices->l_min_M[ij], matrices->l_max_M[ij] ); prepareArray2(&matrices->N_M1[ij], matrices->k_min_M1[ij], matrices->k_max_M1[ij], matrices->l_min_M1[ij], matrices->l_max_M1[ij] ); #endif /* now to the actual computations... */ /* 1st E_M[ij] = E_M1[ij] = E_C[ij] + b */ if (matrices->E_C_rem[ij] != INF) matrices->E_M_rem[ij] = matrices->E_M1_rem[ij] = temp2 + matrices->E_C_rem[ij]; if (matrices->E_C[ij]) { for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) { for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) { if (matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { matrices->E_M[ij][cnt1][cnt2 / 2] = matrices->E_M1[ij][cnt1][cnt2 / 2] = temp2 + matrices->E_C[ij][cnt1][cnt2 / 2]; updatePosteriorBoundaries(cnt1, cnt2, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real_m1, &max_k_real_m1, &min_l_real_m1, &max_l_real_m1 ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1][cnt2 / 2] = matrices->N_M1[ij][cnt1][cnt2 / 2] = matrices->N_C[ij][cnt1][cnt2 / 2]; #endif } } } } /* 2nd E_M[ij] = MIN(E_M[ij], E_M[i+1,j] + c) */ if (matrices->E_M_rem[my_iindx[i + 1] - j] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[my_iindx[i + 1] - j] + P->MLbase ); } if (matrices->E_M[my_iindx[i + 1] - j]) { for (cnt1 = matrices->k_min_M[my_iindx[i + 1] - j]; cnt1 <= matrices->k_max_M[my_iindx[i + 1] - j]; cnt1++) { for (cnt2 = matrices->l_min_M[my_iindx[i + 1] - j][cnt1]; cnt2 <= matrices->l_max_M[my_iindx[i + 1] - j][cnt1]; cnt2 += 2) { if (matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] != INF) { if (((cnt1 + dia) <= maxD1) && ((cnt2 + dib) <= maxD2)) { matrices->E_M[ij][cnt1 + dia][(cnt2 + dib) / 2] = MIN2(matrices->E_M[ij][cnt1 + dia][(cnt2 + dib) / 2], matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase ); updatePosteriorBoundaries(cnt1 + dia, cnt2 + dib, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1 + dia][(cnt2 + dib) / 2] += matrices->N_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2]; #endif } /* collect all cases where dia+cnt1 or dib+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase ); } } } } } /* 3rd E_M[ij] = MIN(E_M[ij], E_M[i,j-1] + c) */ if (matrices->E_M_rem[ij + 1] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[ij + 1] + P->MLbase ); } if (matrices->E_M[ij + 1]) { for (cnt1 = matrices->k_min_M[ij + 1]; cnt1 <= matrices->k_max_M[ij + 1]; cnt1++) { for (cnt2 = matrices->l_min_M[ij + 1][cnt1]; cnt2 <= matrices->l_max_M[ij + 1][cnt1]; cnt2 += 2) { if (matrices->E_M[ij + 1][cnt1][cnt2 / 2] != INF) { if (((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)) { matrices->E_M[ij][cnt1 + dja][(cnt2 + djb) / 2] = MIN2(matrices->E_M[ij][cnt1 + dja][(cnt2 + djb) / 2], matrices->E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); updatePosteriorBoundaries(cnt1 + dja, cnt2 + djb, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1 + dja][(cnt2 + djb) / 2] += matrices->N_M[ij + 1][cnt1][cnt2 / 2]; #endif } /* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); } } } } } /* 4th E_M1[ij] = MIN(E_M1[ij], E_M1[i,j-1] + c) */ if (matrices->E_M1_rem[ij + 1] != INF) { matrices->E_M1_rem[ij] = MIN2(matrices->E_M1_rem[ij], matrices->E_M1_rem[ij + 1] + P->MLbase ); } if (matrices->E_M1[ij + 1]) { for (cnt1 = matrices->k_min_M1[ij + 1]; cnt1 <= matrices->k_max_M1[ij + 1]; cnt1++) { for (cnt2 = matrices->l_min_M1[ij + 1][cnt1]; cnt2 <= matrices->l_max_M1[ij + 1][cnt1]; cnt2 += 2) { if (matrices->E_M1[ij + 1][cnt1][cnt2 / 2] != INF) { if (((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)) { matrices->E_M1[ij][cnt1 + dja][(cnt2 + djb) / 2] = MIN2(matrices->E_M1[ij][cnt1 + dja][(cnt2 + djb) / 2], matrices->E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); updatePosteriorBoundaries(cnt1 + dja, cnt2 + djb, &min_k_real_m1, &max_k_real_m1, &min_l_real_m1, &max_l_real_m1 ); #ifdef COUNT_STATES matrices->N_M1[ij][cnt1 + dja][(cnt2 + djb) / 2] += matrices->N_M1[ij + 1][cnt1][cnt2 / 2]; #endif } /* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_M1_rem[ij] = MIN2(matrices->E_M1_rem[ij], matrices->E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase ); } } } } } /* 5th E_M[ij] = MIN(E_M[ij], min(E_M[i,k] + E_M[k+1,j])) */ if (j > TURN + 2) { for (u = i + 1 + TURN; u <= j - 2 - TURN; u++) { /* check all cases where M(i,u) and/or M(u+1,j) are already out of scope of maxD1 and/or maxD2 */ if (matrices->E_M_rem[my_iindx[i] - u] != INF) { for (cnt3 = matrices->k_min_M[my_iindx[u + 1] - j]; cnt3 <= matrices->k_max_M[my_iindx[u + 1] - j]; cnt3++) { for (cnt4 = matrices->l_min_M[my_iindx[u + 1] - j][cnt3]; cnt4 <= matrices->l_max_M[my_iindx[u + 1] - j][cnt3]; cnt4 += 2) { if (matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[my_iindx[i] - u] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] ); } } } if (matrices->E_M_rem[my_iindx[u + 1] - j] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M_rem[my_iindx[i] - u] + matrices->E_M_rem[my_iindx[u + 1] - j] ); } } if (matrices->E_M_rem[my_iindx[u + 1] - j] != INF) { for (cnt1 = matrices->k_min_M[my_iindx[i] - u]; cnt1 <= matrices->k_max_M[my_iindx[i] - u]; cnt1++) { for (cnt2 = matrices->l_min_M[my_iindx[i] - u][cnt1]; cnt2 <= matrices->l_max_M[my_iindx[i] - u][cnt1]; cnt2 += 2) { if (matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] != INF) { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M_rem[my_iindx[u + 1] - j] ); } } } } if (!matrices->E_M[my_iindx[i] - u]) continue; if (!matrices->E_M[my_iindx[u + 1] - j]) continue; dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i] - u] - referenceBPs1[my_iindx[u + 1] - j]; dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i] - u] - referenceBPs2[my_iindx[u + 1] - j]; for (cnt1 = matrices->k_min_M[my_iindx[i] - u]; cnt1 <= matrices->k_max_M[my_iindx[i] - u]; cnt1++) { for (cnt2 = matrices->l_min_M[my_iindx[i] - u][cnt1]; cnt2 <= matrices->l_max_M[my_iindx[i] - u][cnt1]; cnt2 += 2) { for (cnt3 = matrices->k_min_M[my_iindx[u + 1] - j]; cnt3 <= matrices->k_max_M[my_iindx[u + 1] - j]; cnt3++) { for (cnt4 = matrices->l_min_M[my_iindx[u + 1] - j][cnt3]; cnt4 <= matrices->l_max_M[my_iindx[u + 1] - j][cnt3]; cnt4 += 2) { if ((matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] != INF) && (matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] != INF)) { if (((cnt1 + cnt3 + dia) <= maxD1) && ((cnt2 + cnt4 + dib) <= maxD2)) { matrices->E_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2] = MIN2(matrices->E_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2], matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] ); updatePosteriorBoundaries(cnt1 + cnt3 + dia, cnt2 + cnt4 + dib, &min_k_real_m, &max_k_real_m, &min_l_real_m, &max_l_real_m ); #ifdef COUNT_STATES matrices->N_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2] += matrices->N_M[my_iindx[i] - u][cnt1][cnt2 / 2] * matrices->N_M1[my_iindx[u + 1] - j][cnt3][cnt4 / 2]; #endif } /* collect all cases where dia+cnt1+cnt3 or dib+cnt2+cnt4 exceeds maxD1, maxD2, respectively */ else { matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij], matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] ); } } } } } } } } /* thats all folks for the multiloop decomposition... */ adjustArrayBoundaries(&matrices->E_M[ij], &matrices->k_min_M[ij], &matrices->k_max_M[ij], &matrices->l_min_M[ij], &matrices->l_max_M[ij], min_k_real_m, max_k_real_m, min_l_real_m, max_l_real_m ); adjustArrayBoundaries(&matrices->E_M1[ij], &matrices->k_min_M1[ij], &matrices->k_max_M1[ij], &matrices->l_min_M1[ij], &matrices->l_max_M1[ij], min_k_real_m1, max_k_real_m1, min_l_real_m1, max_l_real_m1 ); #ifdef COUNT_STATES /* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/ #endif } /* end of j-loop */ } /* calculate energies of 5' and 3' fragments */ /* prepare first entries in E_F5 */ for (cnt1 = 1; cnt1 <= TURN + 1; cnt1++) { matrices->E_F5[cnt1] = (int **)vrna_alloc(sizeof(int *)); matrices->E_F5[cnt1][0] = (int *)vrna_alloc(sizeof(int)); matrices->E_F5[cnt1][0][0] = 0; matrices->E_F5_rem[cnt1] = INF; matrices->k_min_F5[cnt1] = matrices->k_max_F5[cnt1] = 0; matrices->l_min_F5[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_max_F5[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_min_F5[cnt1][0] = matrices->l_max_F5[cnt1][0] = 0; #ifdef COUNT_STATES matrices->N_F5[cnt1] = (unsigned long **)vrna_alloc(sizeof(unsigned long *)); matrices->N_F5[cnt1][0] = (unsigned long *)vrna_alloc(sizeof(unsigned long)); matrices->N_F5[cnt1][0][0] = 1; #endif } for (j = TURN + 2; j <= seq_length; j++) { unsigned int da = referenceBPs1[my_iindx[1] - j] - referenceBPs1[my_iindx[1] - j + 1]; unsigned int db = referenceBPs2[my_iindx[1] - j] - referenceBPs2[my_iindx[1] - j + 1]; type = ptype[jindx[j] + 1]; additional_en = 0; if (type) { if (dangles == 2) additional_en += vrna_E_ext_stem(type, -1, j < seq_length ? S1[j + 1] : -1, P); else additional_en += vrna_E_ext_stem(type, -1, -1, P); } /* make min and max k guess for memory allocation */ int min_k_guess, max_k_guess, min_l_guess, max_l_guess; int *min_l_real, *max_l_real, min_k_real, max_k_real; min_k_guess = min_l_guess = 0; max_k_guess = referenceBPs1[my_iindx[1] - j] + mm1[my_iindx[1] - j]; max_l_guess = referenceBPs2[my_iindx[1] - j] + mm2[my_iindx[1] - j]; prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[my_iindx[1] - j], &matrices->k_min_F5[j], &matrices->k_max_F5[j], &matrices->l_min_F5[j], &matrices->l_max_F5[j] ); preparePosteriorBoundaries(matrices->k_max_F5[j] - matrices->k_min_F5[j] + 1, matrices->k_min_F5[j], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); prepareArray(&matrices->E_F5[j], matrices->k_min_F5[j], matrices->k_max_F5[j], matrices->l_min_F5[j], matrices->l_max_F5[j] ); #ifdef COUNT_STATES prepareArray2(&matrices->N_F5[j], matrices->k_min_F5[j], matrices->k_max_F5[j], matrices->l_min_F5[j], matrices->l_max_F5[j] ); #endif /* begin the actual computation of 5' end energies */ /* j-1 is unpaired ... */ matrices->E_F5_rem[j] = matrices->E_F5_rem[j - 1]; for (cnt1 = matrices->k_min_F5[j - 1]; cnt1 <= matrices->k_max_F5[j - 1]; cnt1++) { for (cnt2 = matrices->l_min_F5[j - 1][cnt1]; cnt2 <= matrices->l_max_F5[j - 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->E_F5[j][cnt1 + da][(cnt2 + db) / 2] = MIN2(matrices->E_F5[j][cnt1 + da][(cnt2 + db) / 2], matrices->E_F5[j - 1][cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1 + da][(cnt2 + db) / 2] += matrices->N_F5[j - 1][cnt1][cnt2 / 2]; #endif } /* collect all cases where da+cnt1 or db+cnt2 exceeds maxD1, maxD2, respectively */ else { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5[j - 1][cnt1][cnt2 / 2]); } } } /* j pairs with 1 */ if (matrices->E_C_rem[my_iindx[1] - j] != INF) matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_C_rem[my_iindx[1] - j] + additional_en); if (matrices->E_C[my_iindx[1] - j]) { for (cnt1 = matrices->k_min_C[my_iindx[1] - j]; cnt1 <= matrices->k_max_C[my_iindx[1] - j]; cnt1++) for (cnt2 = matrices->l_min_C[my_iindx[1] - j][cnt1]; cnt2 <= matrices->l_max_C[my_iindx[1] - j][cnt1]; cnt2 += 2) { if (matrices->E_C[my_iindx[1] - j][cnt1][cnt2 / 2] != INF) { matrices->E_F5[j][cnt1][cnt2 / 2] = MIN2(matrices->E_F5[j][cnt1][cnt2 / 2], matrices->E_C[my_iindx[1] - j][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1][cnt2 / 2] += matrices->N_C[my_iindx[1] - j][cnt1][cnt2 / 2]; #endif } } } /* j pairs with some other nucleotide -> see below */ for (i = j - TURN - 1; i > 1; i--) { ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { if (dangles == 2) additional_en = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P); else additional_en = vrna_E_ext_stem(type, -1, -1, P); if (matrices->E_C_rem[ij] != INF) { for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++) for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) { if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF) { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C_rem[ij] + additional_en ); } } if (matrices->E_F5_rem[i - 1] != INF) { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5_rem[i - 1] + matrices->E_C_rem[ij] + additional_en ); } } if ((matrices->E_F5_rem[i - 1] != INF) && (matrices->E_C[ij])) { for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) if (matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5_rem[i - 1] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); } } if (!matrices->E_C[ij]) continue; unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1]; unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1]; for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++) for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) { if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF && matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { if (((cnt1 + cnt3 + d1a) <= maxD1) && ((cnt2 + cnt4 + d1b) <= maxD2)) { matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] = MIN2(matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1 + cnt3 + d1a, cnt2 + cnt4 + d1b, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] += matrices->N_F5[i - 1][cnt3][cnt4 / 2] * matrices->N_C[ij][cnt1][cnt2 / 2]; #endif } /* collect all cases where d1a+cnt1+cnt3 or d1b+cnt2+cnt4 exceeds maxD1, maxD2, respectively */ else { matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); } } } } } /* resize and move memory portions of energy matrix E_F5 */ adjustArrayBoundaries(&matrices->E_F5[j], &matrices->k_min_F5[j], &matrices->k_max_F5[j], &matrices->l_min_F5[j], &matrices->l_max_F5[j], min_k_real, max_k_real, min_l_real, max_l_real ); } /* end of j-loop */ if (compute_2Dfold_F3) { /* prepare first entries in E_F3 */ for (cnt1 = seq_length; cnt1 >= seq_length - TURN - 1; cnt1--) { matrices->E_F3[cnt1] = (int **)vrna_alloc(sizeof(int *)); matrices->E_F3[cnt1][0] = (int *)vrna_alloc(sizeof(int)); matrices->E_F3[cnt1][0][0] = 0; matrices->k_min_F3[cnt1] = matrices->k_max_F3[cnt1] = 0; matrices->l_min_F3[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_max_F3[cnt1] = (int *)vrna_alloc(sizeof(int)); matrices->l_min_F3[cnt1][0] = matrices->l_max_F3[cnt1][0] = 0; } /* begin calculations */ for (j = seq_length - TURN - 2; j >= 1; j--) { unsigned int da = referenceBPs1[my_iindx[j] - seq_length] - referenceBPs1[my_iindx[j + 1] - seq_length]; unsigned int db = referenceBPs2[my_iindx[j] - seq_length] - referenceBPs2[my_iindx[j + 1] - seq_length]; type = ptype[jindx[seq_length] + j]; additional_en = 0; if (type) { if (dangles == 2) additional_en += vrna_E_ext_stem(type, j > 1 ? S1[j - 1] : -1, -1, P); else additional_en += vrna_E_ext_stem(type, -1, -1, P); } /* make min and max k guess for memory allocation */ int min_k_guess, max_k_guess, min_l_guess, max_l_guess; int *min_l_real, *max_l_real, min_k_real, max_k_real; min_k_guess = min_l_guess = 0; max_k_guess = referenceBPs1[my_iindx[j] - seq_length] + mm1[my_iindx[j] - seq_length]; max_l_guess = referenceBPs2[my_iindx[j] - seq_length] + mm2[my_iindx[j] - seq_length]; prepareBoundaries(min_k_guess, max_k_guess, min_l_guess, max_l_guess, bpdist[my_iindx[j] - seq_length], &matrices->k_min_F3[j], &matrices->k_max_F3[j], &matrices->l_min_F3[j], &matrices->l_max_F3[j] ); preparePosteriorBoundaries(matrices->k_max_F3[j] - matrices->k_min_F3[j] + 1, matrices->k_min_F3[j], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); prepareArray(&matrices->E_F3[j], matrices->k_min_F3[j], matrices->k_max_F3[j], matrices->l_min_F3[j], matrices->l_max_F3[j] ); /* begin the actual computation of 5' end energies */ /* j is unpaired ... */ for (cnt1 = matrices->k_min_F3[j + 1]; cnt1 <= matrices->k_max_F3[j + 1]; cnt1++) { for (cnt2 = matrices->l_min_F3[j + 1][cnt1]; cnt2 <= matrices->l_max_F3[j + 1][cnt1]; cnt2 += 2) { matrices->E_F3[j][cnt1 + da][(cnt2 + db) / 2] = MIN2(matrices->E_F3[j][cnt1 + da][(cnt2 + db) / 2], matrices->E_F3[j + 1][cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } /* j pairs with n */ if (matrices->E_C[my_iindx[j] - seq_length]) { for (cnt1 = matrices->k_min_C[my_iindx[j] - seq_length]; cnt1 <= matrices->k_max_C[my_iindx[j] - seq_length]; cnt1++) for (cnt2 = matrices->l_min_C[my_iindx[j] - seq_length][cnt1]; cnt2 <= matrices->l_max_C[my_iindx[j] - seq_length][cnt1]; cnt2 += 2) { if (matrices->E_C[my_iindx[j] - seq_length][cnt1][cnt2 / 2] != INF) { matrices->E_F3[j][cnt1][cnt2 / 2] = MIN2(matrices->E_F3[j][cnt1][cnt2 / 2], matrices->E_C[my_iindx[j] - seq_length][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } } /* j pairs with some other nucleotide -> see below */ for (i = j - TURN - 1; i > 1; i--) { ij = my_iindx[i] - j; if (!matrices->E_C[ij]) continue; type = ptype[jindx[j] + i]; if (type) { unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1]; unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1]; if (dangles == 2) additional_en = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P); else additional_en = vrna_E_ext_stem(type, -1, -1, P); for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++) for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) { if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF && matrices->E_C[ij][cnt1][cnt2 / 2] != INF) { matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] = MIN2(matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2], matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en ); updatePosteriorBoundaries(cnt1 + cnt3 + d1a, cnt2 + cnt4 + d1b, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef COUNT_STATES matrices->N_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] += matrices->N_F5[i - 1][cnt3][cnt4 / 2] * matrices->N_C[ij][cnt1][cnt2 / 2]; #endif } } } } /* resize and move memory portions of energy matrix E_F5 */ adjustArrayBoundaries(&matrices->E_F5[j], &matrices->k_min_F5[j], &matrices->k_max_F5[j], &matrices->l_min_F5[j], &matrices->l_max_F5[j], min_k_real, max_k_real, min_l_real, max_l_real ); } /* end of j-loop */ } } /*---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { int *my_iindx, *jindx, energy, type, dangles, cnt1, cnt2, cnt3, cnt4; int **l_min_C, **l_max_C, **l_min_F5, **l_max_F5; int *k_min_C, *k_max_C, *k_min_F5, *k_max_F5; int ***E_C, ***E_F5; int *E_C_rem, *E_F5_rem; unsigned int i, ij, seq_length, maxD1, maxD2; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *ptype; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; unsigned int da, db; P = vc->params; md = &(P->model_details); matrices = vc->matrices; seq_length = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_F5 = matrices->E_F5; l_min_F5 = matrices->l_min_F5; l_max_F5 = matrices->l_max_F5; k_min_F5 = matrices->k_min_F5; k_max_F5 = matrices->k_max_F5; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_F5_rem = matrices->E_F5_rem; E_C_rem = matrices->E_C_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; da = referenceBPs1[my_iindx[1] - j] - referenceBPs1[my_iindx[1] - j + 1]; db = referenceBPs2[my_iindx[1] - j] - referenceBPs2[my_iindx[1] - j + 1]; if (j < TURN + 2) return; /* F5[j] == F5[j-1] ? */ if (k == -1) { if (E_F5_rem[j] == INF) { return; } else if (E_F5_rem[j] == E_F5_rem[j - 1]) { backtrack_f5(j - 1, k, l, structure, vc); return; } else if (E_F5[j - 1]) { for (cnt1 = k_min_F5[j - 1]; cnt1 <= k_max_F5[j - 1]; cnt1++) { for (cnt2 = l_min_F5[j - 1][cnt1]; cnt2 <= l_max_F5[j - 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { if (E_F5_rem[j] == E_F5[j - 1][cnt1][cnt2 / 2]) { backtrack_f5(j - 1, cnt1, cnt2, structure, vc); return; } } } } } } else if ((k >= da) && (l >= db)) { if (E_F5[j - 1]) { if ((k - da >= k_min_F5[j - 1]) && (k - da <= k_max_F5[j - 1])) { if ((l - db >= l_min_F5[j - 1][k - da]) && (l - db <= l_max_F5[j - 1][k - da])) { if (E_F5[j - 1][k - da][(l - db) / 2] == E_F5[j][k][l / 2]) { backtrack_f5(j - 1, k - da, l - db, structure, vc); return; } } } } } type = ptype[jindx[j] + 1]; if (type) { if (dangles == 2) energy = vrna_E_ext_stem(type, -1, j < seq_length ? S1[j + 1] : -1, P); else energy = vrna_E_ext_stem(type, -1, -1, P); if (k == -1) { if (E_C_rem[my_iindx[1] - j] + energy == E_F5_rem[j]) { backtrack_c(1, j, -1, -1, structure, vc); return; } } else if (k >= k_min_C[my_iindx[1] - j] && (k <= k_max_C[my_iindx[1] - j])) { if ((l >= l_min_C[my_iindx[1] - j][k]) && (l <= l_max_C[my_iindx[1] - j][k])) { if (E_C[my_iindx[1] - j][k][l / 2] + energy == E_F5[j][k][l / 2]) { backtrack_c(1, j, k, l, structure, vc); return; } } } } for (i = j - TURN - 1; i > 1; i--) { ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1]; unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1]; if (dangles == 2) energy = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P); else energy = vrna_E_ext_stem(type, -1, -1, P); if (k == -1) { if (E_C_rem[ij] != INF) { for (cnt1 = k_min_F5[i - 1]; cnt1 <= k_max_F5[i - 1]; cnt1++) { for (cnt2 = l_min_F5[i - 1][cnt1]; cnt2 <= l_max_F5[i - 1][cnt1]; cnt2 += 2) { if (E_F5_rem[j] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C_rem[ij] + energy)) { backtrack_f5(i - 1, cnt1, cnt2, structure, vc); backtrack_c(i, j, -1, -1, structure, vc); return; } } } if (E_F5_rem[j] == (E_F5_rem[i - 1] + E_C_rem[ij] + energy)) { backtrack_f5(i - 1, -1, -1, structure, vc); backtrack_c(i, j, -1, -1, structure, vc); return; } } if (E_F5_rem[i - 1] != INF) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) { for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) { if (E_F5_rem[j] == (E_F5_rem[i - 1] + E_C[ij][cnt1][cnt2 / 2] + energy)) { backtrack_f5(i - 1, -1, -1, structure, vc); backtrack_c(i, j, cnt1, cnt2, structure, vc); return; } } } } for (cnt1 = k_min_F5[i - 1]; cnt1 <= k_max_F5[i - 1]; cnt1++) for (cnt2 = l_min_F5[i - 1][cnt1]; cnt2 <= l_max_F5[i - 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[ij]; cnt3 <= k_max_C[ij]; cnt3++) for (cnt4 = l_min_C[ij][cnt3]; cnt4 <= l_max_C[ij][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1a) > maxD1) || ((cnt2 + cnt4 + d1b) > maxD2)) { if (E_F5_rem[j] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C[ij][cnt3][cnt4 / 2] + energy)) { backtrack_f5(i - 1, cnt1, cnt2, structure, vc); backtrack_c(i, j, cnt3, cnt4, structure, vc); return; } } } } else if ((k >= d1a) && (l >= d1b)) { int k_f_max = MIN2(k - d1a, k_max_F5[i - 1]); for (cnt1 = k_min_F5[i - 1]; cnt1 <= k_f_max; cnt1++) { int l_f_max = MIN2(l - d1b, l_max_F5[i - 1][cnt1]); for (cnt2 = l_min_F5[i - 1][cnt1]; cnt2 <= l_f_max; cnt2 += 2) { int k_c = k - d1a - cnt1; if ((k_c >= k_min_C[ij]) && (k_c <= k_max_C[ij])) { int l_c = l - d1b - cnt2; if ((l_c >= l_min_C[ij][k_c]) && (l_c <= l_max_C[ij][k_c])) { if (E_F5[j][k][l / 2] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C[ij][k_c][l_c / 2] + energy)) { backtrack_f5(i - 1, cnt1, cnt2, structure, vc); backtrack_c(i, j, k_c, l_c, structure, vc); return; } } } } } } } } vrna_message_error("backtracking failed in f5"); } PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int p, q, pq, ij, maxp, maxD1, maxD2; int *my_iindx, *jindx, type, type_2, energy, no_close, dangles, base_d1, base_d2, d1, d2, cnt1, cnt2, cnt3, cnt4, *rtype; int **l_min_C, **l_max_C, **l_min_M, **l_max_M, **l_min_M1, **l_max_M1; int *k_min_C, *k_max_C, *k_min_M, *k_max_M, *k_min_M1, *k_max_M1; int ***E_C, ***E_M, ***E_M1, *E_C_rem, *E_M_rem, *E_M1_rem; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *ptype, *sequence; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; E_M1_rem = matrices->E_M1_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; ij = my_iindx[i] - j; int e = (k == -1) ? E_C_rem[ij] : E_C[ij][k][l / 2]; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); structure[i - 1] = '('; structure[j - 1] = ')'; base_d1 = ((unsigned int)vc->reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)vc->reference_pt2[i] != j) ? 1 : -1; base_d1 += referenceBPs1[ij]; base_d2 += referenceBPs2[ij]; if (k == -1) { if (((unsigned int)base_d1 > maxD1) || ((unsigned int)base_d2 > maxD2)) if (e == E_Hairpin(j - i - 1, type, S1[i + 1], S1[j - 1], sequence + i - 1, P)) return; } else { if ((unsigned int)base_d1 == k) if ((unsigned int)base_d2 == l) if (E_Hairpin(j - i - 1, type, S1[i + 1], S1[j - 1], sequence + i - 1, P) == e) return; } maxp = MIN2(j - 2 - TURN, i + MAXLOOP + 1); for (p = i + 1; p <= maxp; p++) { unsigned int minq, ln_pre; minq = p + TURN + 1; ln_pre = j - i - 1; if (ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1; for (q = minq; q < j; q++) { pq = my_iindx[p] - q; type_2 = ptype[jindx[q] + p]; if (type_2 == 0) continue; type_2 = rtype[type_2]; /* d2 = dbp(S_{i,j}, S_{p.q} + {i,j}) */ d1 = base_d1 - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[pq]; energy = E_IntLoop(p - i - 1, j - q - 1, type, type_2, S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); if (k == -1) { if (E_C_rem[pq] != INF) { if (e == (E_C_rem[pq] + energy)) { backtrack_c(p, q, -1, -1, structure, vc); return; } } if (E_C[pq]) { for (cnt1 = k_min_C[pq]; cnt1 <= k_max_C[pq]; cnt1++) for (cnt2 = l_min_C[pq][cnt1]; cnt2 <= l_max_C[pq][cnt1]; cnt2 += 2) { if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e == (E_C[pq][cnt1][cnt2 / 2] + energy)) { backtrack_c(p, q, cnt1, cnt2, structure, vc); return; } } } } } else { if (!E_C[pq]) continue; if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_C[pq]) && (k - d1) <= k_max_C[pq]) { if ((l - d2 >= l_min_C[pq][k - d1]) && (l - d2 <= l_max_C[pq][k - d1])) { if (E_C[pq][k - d1][(l - d2) / 2] + energy == e) { backtrack_c(p, q, k - d1, l - d2, structure, vc); return; } } } } } } /* end q-loop */ } /* end p-loop */ /* multi-loop decomposition ------------------------*/ if (!no_close) { unsigned int u; int tt; if (k == -1) { for (u = i + TURN + 2; u < j - TURN - 2; u++) { int i1u, u1j1; i1u = my_iindx[i + 1] - u; u1j1 = my_iindx[u + 1] - j + 1; tt = rtype[type]; energy = P->MLclosing; if (dangles == 2) energy += E_MLstem(tt, S1[j - 1], S1[i + 1], P); else energy += E_MLstem(tt, -1, -1, P); if (E_M_rem[i1u] != INF) { if (E_M1[u1j1]) { for (cnt1 = k_min_M1[u1j1]; cnt1 <= k_max_M1[u1j1]; cnt1++) for (cnt2 = l_min_M1[u1j1][cnt1]; cnt2 <= l_max_M1[u1j1][cnt1]; cnt2 += 2) { if (e == (E_M_rem[i1u] + E_M1[u1j1][cnt1][cnt2 / 2] + energy)) { backtrack_m(i + 1, u, -1, -1, structure, vc); backtrack_m1(u + 1, j - 1, cnt1, cnt2, structure, vc); return; } } } if (E_M1_rem[u1j1] != INF) { if (e == (E_M_rem[i1u] + E_M1_rem[u1j1] + energy)) { backtrack_m(i + 1, u, -1, -1, structure, vc); backtrack_m1(u + 1, j - 1, -1, -1, structure, vc); return; } } } if (E_M1_rem[u1j1] != INF) { if (E_M[i1u]) { for (cnt1 = k_min_M[i1u]; cnt1 <= k_max_M[i1u]; cnt1++) for (cnt2 = l_min_M[i1u][cnt1]; cnt2 <= l_max_M[i1u][cnt1]; cnt2 += 2) if (e == (E_M[i1u][cnt1][cnt2 / 2] + E_M1_rem[u1j1] + energy)) { backtrack_m(i + 1, u, cnt1, cnt2, structure, vc); backtrack_m1(u + 1, j - 1, -1, -1, structure, vc); return; } } } /* now all cases where we exceed the maxD1/D2 scope by combination of E_M and E_M1 */ if (!E_M[i1u]) continue; if (!E_M1[u1j1]) continue; /* get distance to reference if closing this multiloop * dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1}) */ d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1]; d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1]; for (cnt1 = matrices->k_min_M[i1u]; cnt1 <= matrices->k_max_M[i1u]; cnt1++) for (cnt2 = matrices->l_min_M[i1u][cnt1]; cnt2 <= matrices->l_max_M[i1u][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_M1[u1j1]; cnt3 <= matrices->k_max_M1[u1j1]; cnt3++) for (cnt4 = matrices->l_min_M1[u1j1][cnt3]; cnt4 <= matrices->l_max_M1[u1j1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (e == (E_M[i1u][cnt1][cnt2 / 2] + E_M1[u1j1][cnt3][cnt4 / 2] + energy)) { backtrack_m(i + 1, u, cnt1, cnt2, structure, vc); backtrack_m1(u + 1, j - 1, cnt3, cnt4, structure, vc); return; } } } } } else { for (u = i + TURN + 2; u < j - TURN - 2; u++) { int i1u, u1j1; i1u = my_iindx[i + 1] - u; u1j1 = my_iindx[u + 1] - j + 1; if (!E_M[i1u]) continue; if (!E_M1[u1j1]) continue; /* get distance to reference if closing this multiloop * dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1}) */ d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1]; d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1]; tt = rtype[type]; energy = P->MLclosing; if (dangles == 2) energy += E_MLstem(tt, S1[j - 1], S1[i + 1], P); else energy += E_MLstem(tt, -1, -1, P); if ((d1 <= k) && (d2 <= l)) { for (cnt1 = k_min_M[i1u]; cnt1 <= MIN2(k - d1, k_max_M[i1u]); cnt1++) for (cnt2 = l_min_M[i1u][cnt1]; cnt2 <= MIN2(l - d2, l_max_M[i1u][cnt1]); cnt2 += 2) if (((k - d1 - cnt1) >= k_min_M1[u1j1]) && ((k - d1 - cnt1) <= k_max_M1[u1j1])) { if (((l - d2 - cnt2) >= l_min_M1[u1j1][k - d1 - cnt1]) && ((l - d2 - cnt2) <= l_max_M1[u1j1][k - d1 - cnt1])) { if (e == (energy + E_M[i1u][cnt1][cnt2 / 2] + E_M1[u1j1][k - d1 - cnt1][(l - d2 - cnt2) / 2])) { backtrack_m(i + 1, u, cnt1, cnt2, structure, vc); backtrack_m1(u + 1, j - 1, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } } vrna_message_error("backtracking failed in c"); } PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int u, ij, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2; int *my_iindx, *jindx, type, energy, dangles, circ, cnt1, cnt2, cnt3, cnt4; int **l_min_C, **l_max_C, **l_min_M, **l_max_M; int *k_min_C, *k_max_C, *k_min_M, *k_max_M; int ***E_C, ***E_M, *E_C_rem, *E_M_rem; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *ptype; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; seq_length = vc->length; S1 = vc->sequence_encoding; circ = md->circ; ptype = vc->ptype; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; ij = my_iindx[i] - j; int e = (k == -1) ? E_M_rem[ij] : E_M[ij][k][l / 2]; base_d1 = referenceBPs1[ij]; base_d2 = referenceBPs2[ij]; if (k == -1) { /* new_fML = ML(i+1,j)+c */ d1 = base_d1 - referenceBPs1[my_iindx[i + 1] - j]; d2 = base_d2 - referenceBPs2[my_iindx[i + 1] - j]; if (E_M_rem[my_iindx[i + 1] - j] != INF) { if (e == (E_M_rem[my_iindx[i + 1] - j] + P->MLbase)) { backtrack_m(i + 1, j, -1, -1, structure, vc); return; } } if (E_M[my_iindx[i + 1] - j]) { for (cnt1 = k_min_M[my_iindx[i + 1] - j]; cnt1 <= k_max_M[my_iindx[i + 1] - j]; cnt1++) for (cnt2 = l_min_M[my_iindx[i + 1] - j][cnt1]; cnt2 <= l_max_M[my_iindx[i + 1] - j][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e == (E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase)) { backtrack_m(i + 1, j, cnt1, cnt2, structure, vc); return; } } } /* new_fML = min(ML(i,j-1) + c, new_fML) */ d1 = base_d1 - referenceBPs1[ij + 1]; d2 = base_d2 - referenceBPs2[ij + 1]; if (E_M_rem[ij + 1] != INF) { if (e == (E_M_rem[ij + 1] + P->MLbase)) { backtrack_m(i, j - 1, -1, -1, structure, vc); return; } } if (E_M[ij + 1]) { for (cnt1 = k_min_M[ij + 1]; cnt1 <= k_max_M[ij + 1]; cnt1++) for (cnt2 = l_min_M[ij + 1][cnt1]; cnt2 <= l_max_M[ij + 1][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e == (E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase)) { backtrack_m(i, j - 1, cnt1, cnt2, structure, vc); return; } } } /* new_fML = min(new_fML, C(i,j)+b) */ if (E_C_rem[ij] != INF) { type = ptype[jindx[j] + i]; if (dangles == 2) energy = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (e == (E_C_rem[ij] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); return; } } /* modular decomposition -------------------------------*/ for (u = i + 1 + TURN; u <= j - 2 - TURN; u++) { int iu, uj; iu = my_iindx[i] - u; uj = my_iindx[u + 1] - j; type = ptype[jindx[j] + u + 1]; d1 = base_d1 - referenceBPs1[iu] - referenceBPs1[uj]; d2 = base_d2 - referenceBPs2[iu] - referenceBPs2[uj]; if (dangles == 2) energy = E_MLstem(type, S1[u], (j < seq_length) || circ ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (E_M_rem[iu] != INF) { if (E_C[uj]) { for (cnt1 = k_min_C[uj]; cnt1 <= k_max_C[uj]; cnt1++) for (cnt2 = l_min_C[uj][cnt1]; cnt2 <= l_max_C[uj][cnt1]; cnt2 += 2) if (e == (E_M_rem[iu] + E_C[uj][cnt1][cnt2 / 2] + energy)) { backtrack_m(i, u, -1, -1, structure, vc); backtrack_c(u + 1, j, cnt1, cnt2, structure, vc); return; } } if (E_C_rem[uj] != INF) { if (e == (E_M_rem[iu] + E_C_rem[uj] + energy)) { backtrack_m(i, u, -1, -1, structure, vc); backtrack_c(u + 1, j, -1, -1, structure, vc); return; } } } if (E_C_rem[uj] != INF) { if (E_M[iu]) { for (cnt1 = k_min_M[iu]; cnt1 <= k_max_M[iu]; cnt1++) for (cnt2 = l_min_M[iu][cnt1]; cnt2 <= l_max_M[iu][cnt1]; cnt2 += 2) if (e == (E_M[iu][cnt1][cnt2 / 2] + E_C_rem[uj] + energy)) { backtrack_m(i, u, cnt1, cnt2, structure, vc); backtrack_c(u + 1, j, -1, -1, structure, vc); return; } } } if (!E_M[iu]) continue; if (!E_C[uj]) continue; for (cnt1 = k_min_M[iu]; cnt1 <= k_max_M[iu]; cnt1++) for (cnt2 = l_min_M[iu][cnt1]; cnt2 <= l_max_M[iu][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[uj]; cnt3 <= k_max_C[uj]; cnt3++) { for (cnt4 = l_min_C[uj][cnt3]; cnt4 <= l_max_C[uj][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (e == (E_M[iu][cnt1][cnt2 / 2] + E_C[uj][cnt3][cnt4 / 2] + energy)) { backtrack_m(i, u, cnt1, cnt2, structure, vc); backtrack_c(u + 1, j, cnt3, cnt4, structure, vc); return; } } } } } /* end if (k == -1) */ else { d1 = base_d1 - referenceBPs1[my_iindx[i + 1] - j]; d2 = base_d2 - referenceBPs2[my_iindx[i + 1] - j]; /* new_fML = ML(i+1,j)+c */ if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_M[my_iindx[i + 1] - j]) && (k - d1 <= k_max_M[my_iindx[i + 1] - j])) { if ((l - d2 >= l_min_M[my_iindx[i + 1] - j][k - d1]) && (l - d2 <= l_max_M[my_iindx[i + 1] - j][k - d1])) { if (E_M[my_iindx[i + 1] - j][k - d1][(l - d2) / 2] + P->MLbase == e) { backtrack_m(i + 1, j, k - d1, l - d2, structure, vc); return; } } } } d1 = base_d1 - referenceBPs1[ij + 1]; d2 = base_d2 - referenceBPs2[ij + 1]; /* new_fML = min(ML(i,j-1) + c, new_fML) */ if (E_M[ij + 1]) { if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_M[ij + 1]) && (k - d1 <= k_max_M[ij + 1])) { if ((l - d2 >= l_min_M[ij + 1][k - d1]) && (l - d2 <= l_max_M[ij + 1][k - d1])) { if (E_M[ij + 1][k - d1][(l - d2) / 2] + P->MLbase == e) { backtrack_m(i, j - 1, k - d1, l - d2, structure, vc); return; } } } } } /* new_fML = min(new_fML, C(i,j)+b) */ if (E_C[ij]) { type = ptype[jindx[j] + i]; if (dangles == 2) energy = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if ((k >= k_min_C[ij]) && (k <= k_max_C[ij])) { if ((l >= l_min_C[ij][k]) && (l <= l_max_C[ij][k])) { if (E_C[ij][k][l / 2] + energy == e) { backtrack_c(i, j, k, l, structure, vc); return; } } } } /* modular decomposition -------------------------------*/ for (u = i + 1 + TURN; u <= j - 2 - TURN; u++) { if (!E_M[my_iindx[i] - u]) continue; if (!E_C[my_iindx[u + 1] - j]) continue; type = ptype[jindx[j] + u + 1]; d1 = base_d1 - referenceBPs1[my_iindx[i] - u] - referenceBPs1[my_iindx[u + 1] - j]; d2 = base_d2 - referenceBPs2[my_iindx[i] - u] - referenceBPs2[my_iindx[u + 1] - j]; if (dangles == 2) energy = E_MLstem(type, S1[u], ((j < seq_length) || circ) ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (d1 <= k && d2 <= l) { for (cnt1 = k_min_M[my_iindx[i] - u]; cnt1 <= MIN2(k - d1, k_max_M[my_iindx[i] - u]); cnt1++) for (cnt2 = l_min_M[my_iindx[i] - u][cnt1]; cnt2 <= MIN2(l - d2, l_max_M[my_iindx[i] - u][cnt1]); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_C[my_iindx[u + 1] - j]) && (k - d1 - cnt1 <= k_max_C[my_iindx[u + 1] - j])) { if ((l - d2 - cnt2 >= l_min_C[my_iindx[u + 1] - j][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_C[my_iindx[u + 1] - j][k - d1 - cnt1])) { if (E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + E_C[my_iindx[u + 1] - j][k - d1 - cnt1][(l - d2 - cnt2) / 2] + energy == e) { backtrack_m(i, u, cnt1, cnt2, structure, vc); backtrack_c(u + 1, j, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } vrna_message_error("backtracking failed in fML\n"); } PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int ij, seq_length, d1, d2, *referenceBPs1, *referenceBPs2, maxD1, maxD2; int *my_iindx, *jindx, **l_min_C, **l_max_C, **l_min_M1, **l_max_M1; int *k_min_C, *k_max_C, *k_min_M1, *k_max_M1, cnt1, cnt2; int ***E_C, ***E_M1, *E_C_rem, *E_M1_rem, type, dangles, circ, energy, e_m1; short *S1; char *ptype; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; seq_length = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; circ = md->circ; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_C_rem = matrices->E_C_rem; E_M1_rem = matrices->E_M1_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; ij = my_iindx[i] - j; e_m1 = (k == -1) ? E_M1_rem[ij] : E_M1[ij][k][l / 2]; type = ptype[jindx[j] + i]; d1 = referenceBPs1[ij] - referenceBPs1[ij + 1]; d2 = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (dangles == 2) energy = E_MLstem(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, P); else energy = E_MLstem(type, -1, -1, P); if (k == -1) { if (E_C_rem[ij] != INF) { if (e_m1 == (E_C_rem[ij] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); return; } } if (E_M1_rem[ij + 1] != INF) { if (e_m1 == (E_M1_rem[ij + 1] + P->MLbase)) { backtrack_m1(i, j - 1, -1, -1, structure, vc); return; } } for (cnt1 = k_min_M1[ij + 1]; cnt1 <= k_max_M1[ij + 1]; cnt1++) for (cnt2 = l_min_M1[ij + 1][cnt1]; cnt2 <= l_max_M1[ij + 1][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (e_m1 == (E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase)) { backtrack_m1(i, j - 1, cnt1, cnt2, structure, vc); return; } } } else { if (E_C[ij]) { if ((k >= k_min_C[ij]) && (k <= k_max_C[ij])) { if ((l >= l_min_C[ij][k]) && (l <= l_max_C[ij][k])) { if (E_C[ij][k][l / 2] + energy == e_m1) { backtrack_c(i, j, k, l, structure, vc); return; } } } } if (d1 <= k && d2 <= l) { if ((k - d1 >= k_min_M1[ij + 1]) && (k - d1 <= k_max_M1[ij + 1])) { if ((l - d2 >= l_min_M1[ij + 1][k - d1]) && (l - d2 <= l_max_M1[ij + 1][k - d1])) { if (E_M1[ij + 1][k - d1][(l - d2) / 2] + P->MLbase == e_m1) { backtrack_m1(i, j - 1, k - d1, l - d2, structure, vc); return; } } } } } vrna_message_error("backtack failed in m1\n"); } PRIVATE void backtrack_fc(int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int d, i, j, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2; int *my_iindx, *jindx, energy, cnt1, cnt2, cnt3, cnt4, *rtype; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *sequence, *ptype; int **E_Fc, **E_FcH, **E_FcI, **E_FcM, ***E_C, ***E_M, ***E_M2; int *E_C_rem, *E_M_rem, *E_M2_rem, E_Fc_rem, E_FcH_rem, E_FcI_rem, E_FcM_rem; int **l_min_C, **l_max_C, *k_min_C, *k_max_C; int **l_min_M, **l_max_M, *k_min_M, *k_max_M; int **l_min_M2, **l_max_M2, *k_min_M2, *k_max_M2; int *l_min_FcH, *l_max_FcH, k_min_FcH, k_max_FcH; int *l_min_FcI, *l_max_FcI, k_min_FcI, k_max_FcI; int *l_min_FcM, *l_max_FcM, k_min_FcM, k_max_FcM; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; seq_length = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; base_d1 = referenceBPs1[my_iindx[1] - seq_length]; base_d2 = referenceBPs2[my_iindx[1] - seq_length]; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_M2 = matrices->E_M2; l_min_M2 = matrices->l_min_M2; l_max_M2 = matrices->l_max_M2; k_min_M2 = matrices->k_min_M2; k_max_M2 = matrices->k_max_M2; E_Fc = matrices->E_Fc; E_FcI = matrices->E_FcI; l_min_FcI = matrices->l_min_FcI; l_max_FcI = matrices->l_max_FcI; k_min_FcI = matrices->k_min_FcI; k_max_FcI = matrices->k_max_FcI; E_FcH = matrices->E_FcH; l_min_FcH = matrices->l_min_FcH; l_max_FcH = matrices->l_max_FcH; k_min_FcH = matrices->k_min_FcH; k_max_FcH = matrices->k_max_FcH; E_FcM = matrices->E_FcM; l_min_FcM = matrices->l_min_FcM; l_max_FcM = matrices->l_max_FcM; k_min_FcM = matrices->k_min_FcM; k_max_FcM = matrices->k_max_FcM; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; E_M2_rem = matrices->E_M2_rem; E_Fc_rem = matrices->E_Fc_rem; E_FcH_rem = matrices->E_FcH_rem; E_FcI_rem = matrices->E_FcI_rem; E_FcM_rem = matrices->E_FcM_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; if (k == -1) { /* check if mfe might be open chain */ if (E_Fc_rem == 0) if ((referenceBPs1[my_iindx[1] - seq_length] > maxD1) || (referenceBPs2[my_iindx[1] - seq_length] > maxD2)) return; /* check for hairpin configurations */ if (E_Fc_rem == E_FcH_rem) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij; int type, no_close; char loopseq[10]; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; d1 = base_d1 - referenceBPs1[ij]; d2 = base_d2 - referenceBPs2[ij]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P); if (E_C_rem[ij] != INF) { if (E_Fc_rem == (E_C_rem[ij] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); return; } } if (E_C[ij]) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) { if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + energy)) { backtrack_c(i, j, cnt1, cnt2, structure, vc); return; } } } } } /* check for interior loop configurations */ if (E_Fc_rem == E_FcI_rem) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij, p, q, pq; int type, type_2; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = rtype[(unsigned int)ptype[jindx[j] + i]]; if (!type) continue; for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if (E_C_rem[ij] != INF) { if (E_C[pq]) { for (cnt1 = k_min_C[pq]; cnt1 <= k_max_C[pq]; cnt1++) for (cnt2 = l_min_C[pq][cnt1]; cnt2 <= l_max_C[pq][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_C_rem[ij] + E_C[pq][cnt1][cnt2 / 2] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); backtrack_c(p, q, cnt1, cnt2, structure, vc); return; } } if (E_C_rem[pq] != INF) { if (E_Fc_rem == (E_C_rem[ij] + E_C_rem[pq] + energy)) { backtrack_c(i, j, -1, -1, structure, vc); backtrack_c(p, q, -1, -1, structure, vc); return; } } } if (E_C_rem[pq] != INF) { if (E_C[ij]) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + E_C_rem[pq] + energy)) { backtrack_c(i, j, cnt1, cnt2, structure, vc); backtrack_c(p, q, -1, -1, structure, vc); return; } } } if (!(E_C[ij])) continue; if (!(E_C[pq])) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[pq]; cnt3 <= k_max_C[pq]; cnt3++) for (cnt4 = l_min_C[pq][cnt3]; cnt4 <= l_max_C[pq][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + E_C[pq][cnt3][cnt4 / 2] + energy)) { backtrack_c(i, j, cnt1, cnt2, structure, vc); backtrack_c(p, q, cnt3, cnt4, structure, vc); return; } } } /* end for p */ } /* end for q */ } } /* check for multi loop configurations */ if (E_Fc_rem == E_FcM_rem) { if (seq_length > 2 * TURN) { for (i = TURN + 1; i < seq_length - 2 * TURN; i++) { /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ if (E_M_rem[my_iindx[1] - i] != INF) { if (E_M2[i + 1]) { for (cnt1 = k_min_M2[i + 1]; cnt1 <= k_max_M2[i + 1]; cnt1++) for (cnt2 = l_min_M2[i + 1][cnt1]; cnt2 <= l_max_M2[i + 1][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_M_rem[my_iindx[1] - i] + E_M2[i + 1][cnt1][cnt2 / 2] + P->MLclosing)) { backtrack_m(1, i, -1, -1, structure, vc); backtrack_m2(i + 1, cnt1, cnt2, structure, vc); return; } } if (E_M2_rem[i + 1] != INF) { if (E_Fc_rem == (E_M_rem[my_iindx[1] - i] + E_M2_rem[i + 1] + P->MLclosing)) { backtrack_m(1, i, -1, -1, structure, vc); backtrack_m2(i + 1, -1, -1, structure, vc); return; } } } if (E_M2_rem[i + 1] != INF) { if (E_M[my_iindx[1] - i]) { for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) if (E_Fc_rem == (E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2_rem[i + 1] + P->MLclosing)) { backtrack_m(1, i, cnt1, cnt2, structure, vc); backtrack_m2(i + 1, -1, -1, structure, vc); return; } } } if (!(E_M[my_iindx[1] - i])) continue; if (!(E_M2[i + 1])) continue; d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length]; d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length]; for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) for (cnt3 = k_min_M2[i + 1]; cnt3 <= k_max_M2[i + 1]; cnt3++) for (cnt4 = l_min_M2[i + 1][cnt3]; cnt4 <= l_max_M2[i + 1][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (E_Fc_rem == (E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2[i + 1][cnt3][cnt4 / 2] + P->MLclosing)) { backtrack_m(1, i, cnt1, cnt2, structure, vc); backtrack_m2(i + 1, cnt3, cnt4, structure, vc); return; } } } } } } else { /* open chain ? */ if (E_Fc[k][l / 2] == 0) if ((k == referenceBPs1[my_iindx[1] - seq_length]) && (l == referenceBPs2[my_iindx[1] - seq_length])) return; if ((k >= k_min_FcH) && (k <= k_max_FcH)) { if ((l >= l_min_FcH[k]) && (l <= l_max_FcH[k])) { if (E_Fc[k][l / 2] == E_FcH[k][l / 2]) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij; int type, no_close; char loopseq[10]; i = j - d + 1; ij = my_iindx[i] - j; if (!E_C[ij]) continue; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; d1 = base_d1 - referenceBPs1[ij]; d2 = base_d2 - referenceBPs2[ij]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P); if ((k >= d1) && (l >= d2)) { if ((k - d1 >= k_min_C[ij]) && (k - d1 <= k_max_C[ij])) { if ((l - d2 >= l_min_C[ij][k - d1]) && (l - d2 <= l_max_C[ij][k - d1])) { if (E_Fc[k][l / 2] == E_C[ij][k - d1][(l - d2) / 2] + energy) { backtrack_c(i, j, k - d1, l - d2, structure, vc); return; } } } } } } } } if ((k >= k_min_FcI) && (k <= k_max_FcI)) { if ((l >= l_min_FcI[k]) && (l <= l_max_FcI[k])) { if (E_Fc[k][l / 2] == E_FcI[k][l / 2]) { for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij, p, q, pq; int type, type_2; i = j - d + 1; ij = my_iindx[i] - j; if (!E_C[ij]) continue; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; type = rtype[type]; if (!type) continue; for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; if (!E_C[pq]) continue; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if ((k >= d1) && (l >= d2)) { for (cnt1 = k_min_C[ij]; cnt1 <= MIN2(k_max_C[ij], k - d1); cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= MIN2(l_max_C[ij][cnt1], l - d2); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_C[pq]) && (k - d1 - cnt1 <= k_max_C[pq])) { if ((l - d2 - cnt2 >= l_min_C[pq][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_C[pq][k - d1 - cnt1])) { if ((E_C[ij][cnt1][cnt2 / 2] + E_C[pq][k - d1 - cnt1][(l - d2 - cnt2) / 2] + energy) == E_Fc[k][l / 2]) { backtrack_c(i, j, cnt1, cnt2, structure, vc); backtrack_c(p, q, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } } } } } if ((k >= k_min_FcM) && (k <= k_max_FcM)) { if ((l >= l_min_FcM[k]) && (l <= l_max_FcM[k])) { if (E_Fc[k][l / 2] == E_FcM[k][l / 2]) { if (seq_length > 2 * TURN) { for (i = TURN + 1; i < seq_length - 2 * TURN; i++) { /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ if (!E_M[my_iindx[1] - i]) continue; if (!E_M2[i + 1]) continue; d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length]; d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length]; if ((k >= d1) && (l >= d2)) { for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= MIN2(k_max_M[my_iindx[1] - i], k - d1); cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= MIN2(l_max_M[my_iindx[1] - i][cnt1], l - d2); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_M2[i + 1]) && (k - d1 - cnt1 <= k_max_M2[i + 1])) { if ((l - d2 - cnt2 >= l_min_M2[i + 1][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_M2[i + 1][k - d1 - cnt1])) { if ((E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2[i + 1][k - d1 - cnt1][(l - d2 - cnt2) / 2] + P->MLclosing) == E_FcM[k][l / 2]) { backtrack_m(1, i, cnt1, cnt2, structure, vc); backtrack_m2(i + 1, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } } } } } } vrna_message_error("backtack failed in fc\n"); } PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, vrna_fold_compound_t *vc) { unsigned int j, ij, j3, n; unsigned int *referenceBPs1, *referenceBPs2; unsigned int d1, d2, base_d1, base_d2, maxD1, maxD2; int *my_iindx, cnt1, cnt2, cnt3, cnt4; int ***E_M1, ***E_M2, *E_M2_rem, *E_M1_rem, e; int **l_min_M1, **l_max_M1, *k_min_M1, *k_max_M1; vrna_mx_mfe_t *matrices; matrices = vc->matrices; n = vc->length; my_iindx = vc->iindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_M1_rem = matrices->E_M1_rem; E_M2 = matrices->E_M2; E_M2_rem = matrices->E_M2_rem; maxD1 = vc->maxD1; maxD2 = vc->maxD2; base_d1 = referenceBPs1[my_iindx[i] - n]; base_d2 = referenceBPs2[my_iindx[i] - n]; if (k == -1) { e = E_M2_rem[i]; for (j = i + TURN + 1; j < n - TURN - 1; j++) { if (E_M1_rem[my_iindx[i] - j] != INF) { if (E_M1[my_iindx[j + 1] - n]) { for (cnt1 = k_min_M1[my_iindx[j + 1] - n]; cnt1 <= k_max_M1[my_iindx[j + 1] - n]; cnt1++) for (cnt2 = l_min_M1[my_iindx[j + 1] - n][cnt1]; cnt2 <= l_max_M1[my_iindx[j + 1] - n][cnt1]; cnt2++) if (e == E_M1_rem[my_iindx[i] - j] + E_M1[my_iindx[j + 1] - n][cnt1][cnt2 / 2]) { backtrack_m1(i, j, k, l, structure, vc); backtrack_m1(j + 1, n, cnt1, cnt2, structure, vc); return; } } if (E_M1_rem[my_iindx[j + 1] - n] != INF) { if (e == E_M1_rem[my_iindx[i] - j] + E_M1_rem[my_iindx[j + 1] - n]) { backtrack_m1(i, j, k, l, structure, vc); backtrack_m1(j + 1, n, k, l, structure, vc); return; } } } if (E_M1_rem[my_iindx[j + 1] - n] != INF) { if (E_M1[my_iindx[i] - j]) { for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) if (e == E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1_rem[my_iindx[j + 1] - n]) { backtrack_m1(i, j, cnt1, cnt2, structure, vc); backtrack_m1(j + 1, n, k, l, structure, vc); return; } } } if (!E_M1[my_iindx[i] - j]) continue; if (!E_M1[my_iindx[j + 1] - n]) continue; d1 = referenceBPs1[my_iindx[i] - n] - referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[j + 1] - n]; d2 = referenceBPs2[my_iindx[i] - n] - referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[j + 1] - n]; for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) { for (cnt3 = k_min_M1[my_iindx[j + 1] - n]; cnt3 <= k_max_M1[my_iindx[j + 1] - n]; cnt3++) for (cnt4 = l_min_M1[my_iindx[j + 1] - n][cnt3]; cnt4 <= l_max_M1[my_iindx[j + 1] - n][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) { if (e == E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - n][cnt3][cnt4 / 2]) { backtrack_m1(i, j, cnt1, cnt2, structure, vc); backtrack_m1(j + 1, n, cnt3, cnt4, structure, vc); return; } } } } } } else { for (j = i + TURN + 1; j < n - TURN - 1; j++) { if (!E_M1[my_iindx[i] - j]) continue; if (!E_M1[my_iindx[j + 1] - n]) continue; ij = my_iindx[i] - j; j3 = my_iindx[j + 1] - n; d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[j3]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[j3]; for (cnt1 = k_min_M1[ij]; cnt1 <= MIN2(k_max_M1[ij], k - d1); cnt1++) for (cnt2 = l_min_M1[ij][cnt1]; cnt2 <= MIN2(l_max_M1[ij][cnt1], l - d2); cnt2 += 2) if ((k - d1 - cnt1 >= k_min_M1[j3]) && (k - d1 - cnt1 <= k_max_M1[j3])) { if ((l - d2 - cnt2 >= l_min_M1[j3][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_M1[j3][k - d1 - cnt1])) { if (E_M1[ij][cnt1][cnt2 / 2] + E_M1[j3][k - d1 - cnt1][(l - d2 - cnt2) / 2] == E_M2[i][k][l / 2]) { backtrack_m1(i, j, cnt1, cnt2, structure, vc); backtrack_m1(j + 1, n, k - d1 - cnt1, l - d2 - cnt2, structure, vc); return; } } } } } vrna_message_error("backtack failed in m2\n"); } PRIVATE void mfe_circ(vrna_fold_compound_t *vc) { unsigned int d, i, j, maxD1, maxD2, seq_length, *referenceBPs1, *referenceBPs2, d1, d2, base_d1, base_d2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, energy, cnt1, cnt2, cnt3, cnt4, *rtype; short *S1; char *sequence, *ptype; int ***E_C, ***E_M, ***E_M1; int *E_C_rem, *E_M_rem, *E_M1_rem; int **l_min_C, **l_max_C, **l_min_M, **l_max_M, **l_min_M1, **l_max_M1; int *k_min_C, *k_max_C, *k_min_M, *k_max_M, *k_min_M1, *k_max_M1; vrna_param_t *P; vrna_md_t *md; vrna_mx_mfe_t *matrices; P = vc->params; md = &(P->model_details); matrices = vc->matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; E_C = matrices->E_C; l_min_C = matrices->l_min_C; l_max_C = matrices->l_max_C; k_min_C = matrices->k_min_C; k_max_C = matrices->k_max_C; E_M = matrices->E_M; l_min_M = matrices->l_min_M; l_max_M = matrices->l_max_M; k_min_M = matrices->k_min_M; k_max_M = matrices->k_max_M; E_M1 = matrices->E_M1; l_min_M1 = matrices->l_min_M1; l_max_M1 = matrices->l_max_M1; k_min_M1 = matrices->k_min_M1; k_max_M1 = matrices->k_max_M1; E_C_rem = matrices->E_C_rem; E_M_rem = matrices->E_M_rem; E_M1_rem = matrices->E_M1_rem; #ifdef _OPENMP #pragma omp parallel for private(d1,d2,cnt1,cnt2,cnt3,cnt4,j, i) #endif for (i = 1; i < seq_length - TURN - 1; i++) { /* guess memory requirements for M2 */ int min_k, max_k, max_l, min_l; int min_k_real, max_k_real, *min_l_real, *max_l_real; min_k = min_l = 0; max_k = mm1[my_iindx[i] - seq_length] + referenceBPs1[my_iindx[i] - seq_length]; max_l = mm2[my_iindx[i] - seq_length] + referenceBPs2[my_iindx[i] - seq_length]; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[i] - seq_length], &matrices->k_min_M2[i], &matrices->k_max_M2[i], &matrices->l_min_M2[i], &matrices->l_max_M2[i] ); prepareArray(&matrices->E_M2[i], matrices->k_min_M2[i], matrices->k_max_M2[i], matrices->l_min_M2[i], matrices->l_max_M2[i] ); preparePosteriorBoundaries(matrices->k_max_M2[i] - matrices->k_min_M2[i] + 1, matrices->k_min_M2[i], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); /* begin filling of M2 array */ for (j = i + TURN + 1; j < seq_length - TURN - 1; j++) { if (E_M1_rem[my_iindx[i] - j] != INF) { if (E_M1[my_iindx[j + 1] - seq_length]) { for (cnt1 = k_min_M1[my_iindx[j + 1] - seq_length]; cnt1 <= k_max_M1[my_iindx[j + 1] - seq_length]; cnt1++) for (cnt2 = l_min_M1[my_iindx[j + 1] - seq_length][cnt1]; cnt2 <= l_max_M1[my_iindx[j + 1] - seq_length][cnt1]; cnt2++) matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1_rem[my_iindx[i] - j] + E_M1[my_iindx[j + 1] - seq_length][cnt1][cnt2 / 2] ); } if (E_M1_rem[my_iindx[j + 1] - seq_length] != INF) matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1_rem[my_iindx[i] - j] + E_M1_rem[my_iindx[j + 1] - seq_length]); } if (E_M1_rem[my_iindx[j + 1] - seq_length] != INF) { if (E_M1[my_iindx[i] - j]) { for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1_rem[my_iindx[j + 1] - seq_length] ); } } if (!E_M1[my_iindx[i] - j]) continue; if (!E_M1[my_iindx[j + 1] - seq_length]) continue; d1 = referenceBPs1[my_iindx[i] - seq_length] - referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[j + 1] - seq_length]; d2 = referenceBPs2[my_iindx[i] - seq_length] - referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[j + 1] - seq_length]; for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++) for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) { for (cnt3 = k_min_M1[my_iindx[j + 1] - seq_length]; cnt3 <= k_max_M1[my_iindx[j + 1] - seq_length]; cnt3++) for (cnt4 = l_min_M1[my_iindx[j + 1] - seq_length][cnt3]; cnt4 <= l_max_M1[my_iindx[j + 1] - seq_length][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(matrices->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - seq_length][cnt3][cnt4 / 2] ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } else { matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - seq_length][cnt3][cnt4 / 2] ); } } } } /* resize and move memory portions of energy matrix E_M2 */ adjustArrayBoundaries(&matrices->E_M2[i], &matrices->k_min_M2[i], &matrices->k_max_M2[i], &matrices->l_min_M2[i], &matrices->l_max_M2[i], min_k_real, max_k_real, min_l_real, max_l_real ); } /* end for i */ base_d1 = referenceBPs1[my_iindx[1] - seq_length]; base_d2 = referenceBPs2[my_iindx[1] - seq_length]; /* guess memory requirements for E_FcH, E_FcI and E_FcM */ int min_k, max_k, max_l, min_l; int min_k_real, max_k_real, min_k_real_fcH, max_k_real_fcH, min_k_real_fcI, max_k_real_fcI, min_k_real_fcM, max_k_real_fcM; int *min_l_real, *max_l_real, *min_l_real_fcH, *max_l_real_fcH, *min_l_real_fcI, *max_l_real_fcI, *min_l_real_fcM, *max_l_real_fcM; max_l_real_fcM = min_l_real_fcM = NULL; max_l_real_fcI = min_l_real_fcI = NULL; max_l_real_fcH = min_l_real_fcH = NULL; max_l_real = min_l_real = NULL; min_k = min_l = 0; max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length]; max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length]; #ifdef _OPENMP #pragma omp sections { #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Fc, &matrices->k_max_Fc, &matrices->l_min_Fc, &matrices->l_max_Fc ); prepareArray(&matrices->E_Fc, matrices->k_min_Fc, matrices->k_max_Fc, matrices->l_min_Fc, matrices->l_max_Fc ); #ifdef _OPENMP } #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_FcH, &matrices->k_max_FcH, &matrices->l_min_FcH, &matrices->l_max_FcH ); prepareArray(&matrices->E_FcH, matrices->k_min_FcH, matrices->k_max_FcH, matrices->l_min_FcH, matrices->l_max_FcH ); #ifdef _OPENMP } #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_FcI, &matrices->k_max_FcI, &matrices->l_min_FcI, &matrices->l_max_FcI ); prepareArray(&matrices->E_FcI, matrices->k_min_FcI, matrices->k_max_FcI, matrices->l_min_FcI, matrices->l_max_FcI ); #ifdef _OPENMP } #pragma omp section { #endif prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_FcM, &matrices->k_max_FcM, &matrices->l_min_FcM, &matrices->l_max_FcM ); prepareArray(&matrices->E_FcM, matrices->k_min_FcM, matrices->k_max_FcM, matrices->l_min_FcM, matrices->l_max_FcM ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_fcH, &max_k_real_fcH, &min_l_real_fcH, &max_l_real_fcH ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_fcI, &max_k_real_fcI, &min_l_real_fcI, &max_l_real_fcI ); #ifdef _OPENMP } #pragma omp section { #endif preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_fcM, &max_k_real_fcM, &min_l_real_fcM, &max_l_real_fcM ); #ifdef _OPENMP } } #endif /* begin actual energy calculations */ #ifdef _OPENMP #pragma omp sections private(d, d1,d2,cnt1,cnt2,cnt3,cnt4,j, i, energy) { #pragma omp section { #endif for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij; int type, no_close; char loopseq[10]; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; d1 = base_d1 - referenceBPs1[ij]; d2 = base_d2 - referenceBPs2[ij]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P); if (E_C_rem[ij] != INF) matrices->E_FcH_rem = MIN2(matrices->E_FcH_rem, E_C_rem[ij] + energy); if (!E_C[ij]) continue; for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) { if (((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)) { matrices->E_FcH[cnt1 + d1][(cnt2 + d2) / 2] = MIN2(matrices->E_FcH[cnt1 + d1][(cnt2 + d2) / 2], energy + E_C[ij][cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1 + d1, cnt2 + d2, &min_k_real_fcH, &max_k_real_fcH, &min_l_real_fcH, &max_l_real_fcH ); } else { matrices->E_FcH_rem = MIN2(matrices->E_FcH_rem, energy + E_C[ij][cnt1][cnt2 / 2]); } } } /* end of i-j loop */ /* resize and move memory portions of energy matrix E_FcH */ adjustArrayBoundaries(&matrices->E_FcH, &matrices->k_min_FcH, &matrices->k_max_FcH, &matrices->l_min_FcH, &matrices->l_max_FcH, min_k_real_fcH, max_k_real_fcH, min_l_real_fcH, max_l_real_fcH ); #ifdef _OPENMP } #pragma omp section { #endif for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ for (j = d; j <= seq_length; j++) { unsigned int u, ij, p, q, pq; int type, type_2, no_close; i = j - d + 1; ij = my_iindx[i] - j; u = seq_length - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); type = rtype[type]; if (!type) continue; if (no_close) continue; if (E_C_rem[ij] != INF) { for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if (E_C_rem[pq] != INF) matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C_rem[ij] + E_C_rem[pq] + energy); if (E_C[pq]) { for (cnt1 = k_min_C[pq]; cnt1 <= k_max_C[pq]; cnt1++) for (cnt2 = l_min_C[pq][cnt1]; cnt2 <= l_max_C[pq][cnt1]; cnt2 += 2) matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C_rem[ij] + E_C[pq][cnt1][cnt2 / 2] + energy); } } } } if (E_C[ij]) { for (p = j + 1; p < seq_length; p++) { unsigned int u1, qmin, ln_pre; u1 = p - j - 1; if (u1 + i - 1 > MAXLOOP) break; qmin = p + TURN + 1; ln_pre = u1 + i + seq_length; if (ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1; for (q = qmin; q <= seq_length; q++) { unsigned int u2; pq = my_iindx[p] - q; type_2 = rtype[(unsigned int)ptype[jindx[q] + p]]; if (type_2 == 0) continue; u2 = i - 1 + seq_length - q; if (u1 + u2 > MAXLOOP) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j}) */ d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P); if (E_C_rem[pq] != INF) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C[ij][cnt1][cnt2 / 2] + E_C_rem[pq] + energy); } if (E_C[pq]) { for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++) for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_C[pq]; cnt3 <= k_max_C[pq]; cnt3++) for (cnt4 = l_min_C[pq][cnt3]; cnt4 <= l_max_C[pq][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2( matrices->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], E_C[ij][cnt1][cnt2 / 2] + E_C[pq][cnt3][cnt4 / 2] + energy ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &min_k_real_fcI, &max_k_real_fcI, &min_l_real_fcI, &max_l_real_fcI ); } else { matrices->E_FcI_rem = MIN2( matrices->E_FcI_rem, E_C[ij][cnt1][cnt2 / 2] + E_C[pq][cnt3][cnt4 / 2] + energy ); } } } } } } } /* end of i-j loop */ /* resize and move memory portions of energy matrix E_FcI */ adjustArrayBoundaries(&matrices->E_FcI, &matrices->k_min_FcI, &matrices->k_max_FcI, &matrices->l_min_FcI, &matrices->l_max_FcI, min_k_real_fcI, max_k_real_fcI, min_l_real_fcI, max_l_real_fcI ); #ifdef _OPENMP } #pragma omp section { #endif if (seq_length > 2 * TURN) { for (i = TURN + 1; i < seq_length - 2 * TURN; i++) { /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length]; d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length]; if (E_M_rem[my_iindx[1] - i] != INF) { if (matrices->E_M2[i + 1]) { for (cnt1 = matrices->k_min_M2[i + 1]; cnt1 <= matrices->k_max_M2[i + 1]; cnt1++) for (cnt2 = matrices->l_min_M2[i + 1][cnt1]; cnt2 <= matrices->l_max_M2[i + 1][cnt1]; cnt2 += 2) matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M_rem[my_iindx[1] - i] + matrices->E_M2[i + 1][cnt1][cnt2 / 2] + P->MLclosing); } if (matrices->E_M2_rem[i + 1] != INF) matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M_rem[my_iindx[1] - i] + matrices->E_M2_rem[i + 1] + P->MLclosing); } if (matrices->E_M2_rem[i + 1] != INF) { if (E_M[my_iindx[1] - i]) { for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + matrices->E_M2_rem[i + 1] + P->MLclosing); } } if (!E_M[my_iindx[1] - i]) continue; if (!matrices->E_M2[i + 1]) continue; for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++) for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_M2[i + 1]; cnt3 <= matrices->k_max_M2[i + 1]; cnt3++) for (cnt4 = matrices->l_min_M2[i + 1][cnt3]; cnt4 <= matrices->l_max_M2[i + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) { matrices->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2( matrices->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2], E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + matrices->E_M2[i + 1][cnt3][cnt4 / 2] + P->MLclosing ); updatePosteriorBoundaries(cnt1 + cnt3 + d1, cnt2 + cnt4 + d2, &min_k_real_fcM, &max_k_real_fcM, &min_l_real_fcM, &max_l_real_fcM ); } else { matrices->E_FcM_rem = MIN2( matrices->E_FcM_rem, E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + matrices->E_M2[i + 1][cnt3][cnt4 / 2] + P->MLclosing ); } } } } /* resize and move memory portions of energy matrix E_FcM */ adjustArrayBoundaries(&matrices->E_FcM, &matrices->k_min_FcM, &matrices->k_max_FcM, &matrices->l_min_FcM, &matrices->l_max_FcM, min_k_real_fcM, max_k_real_fcM, min_l_real_fcM, max_l_real_fcM ); #ifdef _OPENMP } } #endif /* compute E_Fc_rem */ matrices->E_Fc_rem = MIN2(matrices->E_FcH_rem, matrices->E_FcI_rem); matrices->E_Fc_rem = MIN2(matrices->E_Fc_rem, matrices->E_FcM_rem); /* add the case were structure is unfolded chain */ if ((referenceBPs1[my_iindx[1] - seq_length] > maxD1) || (referenceBPs2[my_iindx[1] - seq_length] > maxD2)) matrices->E_Fc_rem = MIN2(matrices->E_Fc_rem, 0); /* compute all E_Fc */ for (cnt1 = matrices->k_min_FcH; cnt1 <= matrices->k_max_FcH; cnt1++) for (cnt2 = matrices->l_min_FcH[cnt1]; cnt2 <= matrices->l_max_FcH[cnt1]; cnt2 += 2) { matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2], matrices->E_FcH[cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } for (cnt1 = matrices->k_min_FcI; cnt1 <= matrices->k_max_FcI; cnt1++) for (cnt2 = matrices->l_min_FcI[cnt1]; cnt2 <= matrices->l_max_FcI[cnt1]; cnt2 += 2) { matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2], matrices->E_FcI[cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } for (cnt1 = matrices->k_min_FcM; cnt1 <= matrices->k_max_FcM; cnt1++) for (cnt2 = matrices->l_min_FcM[cnt1]; cnt2 <= matrices->l_max_FcM[cnt1]; cnt2 += 2) { matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2], matrices->E_FcM[cnt1][cnt2 / 2] ); updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } /* add the case were structure is unfolded chain */ matrices->E_Fc[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2] = MIN2(matrices->E_Fc[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2], 0); updatePosteriorBoundaries(referenceBPs1[my_iindx[1] - seq_length], referenceBPs2[my_iindx[1] - seq_length], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); adjustArrayBoundaries(&matrices->E_Fc, &matrices->k_min_Fc, &matrices->k_max_Fc, &matrices->l_min_Fc, &matrices->l_max_Fc, min_k_real, max_k_real, min_l_real, max_l_real ); } PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_post, int k_max_post, int *l_min_post, int *l_max_post) { int cnt1; int k_diff_pre = k_min_post - *k_min; int mem_size = k_max_post - k_min_post + 1; if (k_min_post < INF) { /* free all the unused memory behind actual data */ for (cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* free unused memory before actual data */ for (cnt1 = *k_min; cnt1 < k_min_post; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* move data to front and thereby eliminating unused memory in front of actual data */ if (k_diff_pre > 0) { memmove((int **)(*array), ((int **)(*array)) + k_diff_pre, sizeof(int *) * mem_size); memmove((int *)(*l_min), ((int *)(*l_min)) + k_diff_pre, sizeof(int) * mem_size); memmove((int *)(*l_max), ((int *)(*l_max)) + k_diff_pre, sizeof(int) * mem_size); } /* reallocating memory to actual size used */ *array += *k_min; *array = (int **)realloc(*array, sizeof(int *) * mem_size); *array -= k_min_post; *l_min += *k_min; *l_min = (int *)realloc(*l_min, sizeof(int) * mem_size); *l_min -= k_min_post; *l_max += *k_min; *l_max = (int *)realloc(*l_max, sizeof(int) * mem_size); *l_max -= k_min_post; /* adjust l dimension of array */ for (cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++) { if (l_min_post[cnt1] < INF) { /* new memsize */ mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1) / 2 + 1; /* reshift the pointer */ (*array)[cnt1] += (*l_min)[cnt1] / 2; int shift = (l_min_post[cnt1] % 2 == (*l_min)[cnt1] % 2) ? 0 : 1; /* eliminate unused memory in front of actual data */ unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1]) / 2 + shift; if (start > 0) memmove((int *)((*array)[cnt1]), (int *)((*array)[cnt1]) + start, sizeof(int) * mem_size); (*array)[cnt1] = (int *)realloc((*array)[cnt1], sizeof(int) * mem_size); (*array)[cnt1] -= l_min_post[cnt1] / 2; } else { /* free according memory */ (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min)[cnt1] = l_min_post[cnt1]; (*l_max)[cnt1] = l_max_post[cnt1]; } } else { /* we have to free all unused memory */ for (cnt1 = *k_min; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min) += *k_min; (*l_max) += *k_min; free(*l_min); free(*l_max); (*array) += *k_min; free(*array); *array = NULL; } l_min_post += *k_min; l_max_post += *k_min; free(l_min_post); free(l_max_post); *k_min = k_min_post; *k_max = k_max_post; } PRIVATE INLINE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l) { int i; *min_k = INF; *max_k = 0; *min_l = (int *)vrna_alloc(sizeof(int) * size); *max_l = (int *)vrna_alloc(sizeof(int) * size); for (i = 0; i < size; i++) { (*min_l)[i] = INF; (*max_l)[i] = 0; } *min_l -= shift; *max_l -= shift; } PRIVATE INLINE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l) { (*min_l)[d1] = MIN2((*min_l)[d1], d2); (*max_l)[d1] = MAX2((*max_l)[d1], d2); *min_k = MIN2(*min_k, d1); *max_k = MAX2(*max_k, d1); } INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l) { int cnt; int mem = max_k_pre - min_k_pre + 1; *min_k = min_k_pre; *max_k = max_k_pre; *min_l = (int *)vrna_alloc(sizeof(int) * mem); *max_l = (int *)vrna_alloc(sizeof(int) * mem); *min_l -= min_k_pre; *max_l -= min_k_pre; /* for each k guess the according minimum l*/ for (cnt = min_k_pre; cnt <= max_k_pre; cnt++) { (*min_l)[cnt] = min_l_pre; (*max_l)[cnt] = max_l_pre; while ((*min_l)[cnt] + cnt < bpdist) (*min_l)[cnt]++; if ((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2)) (*min_l)[cnt]++; } } INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l) { int i, j, mem; *array = (int **)vrna_alloc(sizeof(int *) * (max_k - min_k + 1)); *array -= min_k; for (i = min_k; i <= max_k; i++) { mem = (max_l[i] - min_l[i] + 1) / 2 + 1; (*array)[i] = (int *)vrna_alloc(sizeof(int) * mem); for (j = 0; j < mem; j++) (*array)[i][j] = INF; (*array)[i] -= min_l[i] / 2; } } INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l) { int i, mem; *array = (unsigned long **)vrna_alloc(sizeof(unsigned long *) * (max_k - min_k + 1)); *array -= min_k; for (i = min_k; i <= max_k; i++) { mem = (max_l[i] - min_l[i] + 1) / 2 + 1; (*array)[i] = (unsigned long *)vrna_alloc(sizeof(unsigned long) * mem); (*array)[i] -= min_l[i] / 2; } } /* ################################# # OLD API support # ################################# */ /* crosslink data from vars->compatibility to TwoDfold_vars structure */ PRIVATE INLINE void crosslink(TwoDfold_vars *vars) { vrna_fold_compound_t *c; vrna_mx_mfe_t *m; c = vars->compatibility; m = c->matrices; vars->sequence = c->sequence; vars->seq_length = c->length; vars->reference_pt1 = c->reference_pt1; vars->reference_pt2 = c->reference_pt2; vars->referenceBPs1 = c->referenceBPs1; vars->referenceBPs2 = c->referenceBPs2; vars->bpdist = c->bpdist; vars->do_backtrack = 1; vars->dangles = c->params->model_details.dangles; vars->circ = c->params->model_details.circ; vars->temperature = c->params->model_details.temperature; vars->ptype = c->ptype_pf_compat; vars->P = c->params; vars->S = c->sequence_encoding2; vars->S1 = c->sequence_encoding; vars->my_iindx = c->iindx; vars->mm1 = c->mm1; vars->mm2 = c->mm2; vars->maxD1 = c->maxD1; vars->maxD2 = c->maxD2; vars->E_C = m->E_C; vars->l_min_values = m->l_min_C; vars->l_max_values = m->l_max_C; vars->k_min_values = m->k_min_C; vars->k_max_values = m->k_max_C; vars->E_F5 = m->E_F5; vars->l_min_values_f = m->l_min_F5; vars->l_max_values_f = m->l_max_F5; vars->k_min_values_f = m->k_min_F5; vars->k_max_values_f = m->k_max_F5; vars->E_F3 = m->E_F3; vars->l_min_values_f3 = m->l_min_F3; vars->l_max_values_f3 = m->l_max_F3; vars->k_min_values_f3 = m->k_min_F3; vars->k_max_values_f3 = m->k_max_F3; vars->E_M = m->E_M; vars->l_min_values_m = m->l_min_M; vars->l_max_values_m = m->l_max_M; vars->k_min_values_m = m->k_min_M; vars->k_max_values_m = m->k_max_M; vars->E_M1 = m->E_M1; vars->l_min_values_m1 = m->l_min_M1; vars->l_max_values_m1 = m->l_max_M1; vars->k_min_values_m1 = m->k_min_M1; vars->k_max_values_m1 = m->k_max_M1; #ifdef COUNT_STATES vars->N_C = m->N_C; vars->N_F5 = m->N_F5; vars->N_M = m->N_M; vars->N_M1 = m->N_M1; #endif vars->E_M2_rem = m->E_M2_rem; vars->E_M2 = m->E_M2; vars->l_min_values_m2 = m->l_min_M2; vars->l_max_values_m2 = m->l_max_M2; vars->k_min_values_m2 = m->k_min_M2; vars->k_max_values_m2 = m->k_max_M2; vars->E_Fc = m->E_Fc; vars->E_FcH = m->E_FcH; vars->E_FcI = m->E_FcI; vars->E_FcM = m->E_FcM; vars->E_Fc_rem = m->E_Fc_rem; vars->E_FcH_rem = m->E_FcH_rem; vars->E_FcI_rem = m->E_FcI_rem; vars->E_FcM_rem = m->E_FcM_rem; vars->E_C_rem = m->E_C_rem; vars->E_M_rem = m->E_M_rem; vars->E_M1_rem = m->E_M1_rem; vars->E_F5_rem = m->E_F5_rem; } PUBLIC TwoDfold_vars * get_TwoDfold_variables(const char *seq, const char *structure1, const char *structure2, int circ) { vrna_md_t md; TwoDfold_vars *vars; set_model_details(&md); md.circ = circ; vars = (TwoDfold_vars *)vrna_alloc(sizeof(TwoDfold_vars)); vars->compatibility = vrna_fold_compound_TwoD(seq, structure1, structure2, &md, VRNA_OPTION_MFE); crosslink(vars); return vars; } PUBLIC char * TwoDfold_backtrack_f5(unsigned int j, int k, int l, TwoDfold_vars *vars) { return vrna_backtrack5_TwoD(vars->compatibility, k, l, j); } PUBLIC void destroy_TwoDfold_variables(TwoDfold_vars *vars) { if (vars == NULL) return; vrna_fold_compound_free(vars->compatibility); free(vars); } PUBLIC vrna_sol_TwoD_t * TwoDfoldList(TwoDfold_vars *vars, int distance1, int distance2) { vrna_sol_TwoD_t *sol; sol = vrna_mfe_TwoD(vars->compatibility, distance1, distance2); crosslink(vars); return sol; } PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars) { vrna_md_t md; set_model_details(&md); free(vars->compatibility->params); vars->compatibility->params = vrna_params(&md); crosslink(vars); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class DarwinSDKInfo; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; // Does the work necessary to deal with a SYCL kernel lambda. At the moment, // this just marks the list of lambdas required to name the kernel. void AddSYCLKernelLambda(const FunctionDecl *FD); class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType getDecltypeForParenthesizedExpr(Expr *E); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; NamedReturnInfo getNamedReturnInfo(Expr *&E, bool ForceCXX2b = false); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<StringRef> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, SourceLocation *LParenLoc = nullptr, SourceLocation *RParenLoc = nullptr); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); NamedDecl * ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
entrega-malloc-batch.c
#include <string.h> #include <stdlib.h> #include <stdio.h> #include <omp.h> #include "ctimer.h" void add (int A[], int B[], int C[], int N) { int i, carry, sum; carry = 0; for (i=0; i<N; i++) { sum = A[i] + B[i] + carry; if (sum >= 10) { carry = 1; sum -= 10; } else carry = 0; C[i] = sum; } if (carry) printf ("overflow in addition!\n"); } /* B = n * A */ void multiply_one_digit (int A[], int B[], int n, int N) { int i, carry; carry = 0; for (i=0; i<N; i++) { B[i] = n * A[i]; B[i] += carry; if (B[i] >= 10) { carry = B[i] / 10; B[i] %= 10; } else carry = 0; } if (carry) printf ("overflow in multiplication!\n"); } /* "multiplies" a number by BASEn */ void shift_left (int A[], int n, int N) { int i; for (i=N-1; i>=n; i--) A[i] = A[i-n]; while (i >= 0) A[i--] = 0; } /* C = A * B */ void multiply (int A[], int B[], int C[], int N) { int i, j, P[N]; for (i=0; i<N; i++) { /* multiply B by digit A[i] */ multiply_one_digit (B, P, A[i], N); /* shift the partial product left i bytes */ shift_left (P, i, N); /* add result to the running sum */ add (C, P, C, N); } } main(int argc, char**argv) { double t1,t2,tucpu,tscpu,tt1; //printf("%s\n", argv[1]); int len1 = strlen(argv[1]); //printf("%d\n",len1); //printf("%s\n", argv[2]); int len2 = strlen(argv[2]); //printf("%d\n",len2); int N = len1+len2; int A[N], B[N], C[N]; for(int i=0;i < N; i++){ A[i] = 0; B[i] = 0; C[i] = 0; } char k[len1]; strcpy(k, argv[1]); for(int i=0;i < len1; i++){ A[i] = k[len1-1-i] - '0'; } char l[len2]; strcpy(l, argv[2]); for(int i=0;i < len2; i++){ B[i] = l[len2-1-i] - '0'; } // SECUENCIAL ctimer(&t1,&tucpu,&tscpu); multiply(A,B,C,N); ctimer(&t2,&tucpu,&tscpu); tt1 = t2-t1; // PARALELO int E[N]; for(int i=0;i < N; i++) E[i] = 0; //omp_set_dynamic(0); omp_set_num_threads(4); int D[4*N]; int n, i, carry,j,sum, P[N], tid, nthreads; ctimer(&t1,&tucpu,&tscpu); #pragma omp parallel shared (B,A) private(i,n, carry, j, sum, P, tid) { nthreads = omp_get_num_threads(); for(i=0;i < N*nthreads; i++){ D[i] = 0; } #pragma omp barrier tid = omp_get_thread_num(); //printf("soy el thread %u de %u \n", tid, nthreads); for (i=tid; i<len1; i=i+nthreads) { n = A[i]; //printf("\nthread %d i %d n %d\n",tid,i,n); if(tid==0){ //printf("\nthread %d i %d n %d\n",tid,i,n); /*printf("Pbefore [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", P[loop]); printf("]\n");*/ } carry = 0; for (j=0; j<N; j++) { P[j] = n * B[j]; //if (tid==0) //printf("\nPJ %d n %d BJ %d\n",P[j],n,B[j]); P[j] += carry; if (P[j] >= 10) { carry = P[j] / 10; P[j] %= 10; } else carry = 0; } if (carry) printf ("overflow in multiplication!\n"); /* shift the partial product left i bytes */ /*if(tid==0){ //printf("\nthread %d i %d n %d\n",tid,i,n); printf("PSH0 [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", P[loop]); printf("]\n"); }*/ for (j=N-1; j>=i; j--) P[j] = P[j-i]; while (j >= 0) P[j--] = 0; /* add result to the running sum */ /*if(tid==0){ //printf("\nthread %d i %d n %d\n",tid,i,n); printf("P0 [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", P[loop]); printf("]\n"); }*/ //int sum; carry = 0; sum = 0; for (j=0; j<N; j++) { sum = D[tid*N+j] + P[j] + carry; if (sum >= 10) { carry = 1; sum -= 10; } else carry = 0; D[tid*N+j] = sum; } if (carry) printf ("overflow in addition!\n"); } #pragma omp barrier if(tid==0){ /*printf("D [ "); for(int loop = N*nthreads-1; loop >= 0; loop--) printf("%d ", D[loop]); printf("]\n");*/ for(int k=0; k<nthreads;k++){ carry = 0; sum = 0; for (j=0; j<N; j++) { sum = E[j] + D[k*N+j] + carry; if (sum >= 10) { carry = 1; sum -= 10; } else carry = 0; E[j] = sum; } if (carry) printf ("overflow in addition!\n"); } /*printf("C [ "); for(int loop = N-1; loop >= 0; loop--) printf("%d ", E[loop]); printf("]\n");*/ } } ctimer(&t2,&tucpu,&tscpu); printf("%d \t %f \t %f \n",N,(float) tt1, (float) (t2-t1)); /*for(int loop = N-1; loop >= 0; loop--) printf("%d ", C[loop]); printf("]\n");*/ }
ninja_filter.c
/* NINJA-OPS: NINJA Is Not Just Another - OTU Picking Solution Short-read filtering, processing, and denoising program. http://ninja-ops.ninja This program performs filtering of the input reads by various means. Compilation information (GCC): Ascribes to std=gnu99 multi-platform. Use -fopenmp if available for SMP Flags: -m64 -O3 -std=gnu99 -fwhole-program [-fopenmp] ninja_filter.c Compilation directives (-D ...) exist to change k-mer behavior. USE_QSORT may be set to use the faster sort in qsort.h PACKSIZE= may be set for 4, 8, 16, 32, or 64-mers. DO_K_ENDPIECE can be set to enable end-piece consideration in the default k-mer denoising algorithm DO_DEEP_K_DENOISE can be set to use a much stricter k-mer denoising algorithm (also considers endpieces) */ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <time.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_QSORT #include "qsort.h" #endif #define NINJA_VER "1.5.1" #define PRINT_USAGE() \ {\ printf( "\nNINJA Is Not Just Another - OTU Picking Solution v" NINJA_VER "\n");\ printf( "Short-read filtering, processing, and denoising program. Usage:\n");\ printf( "ninja_filter in_reads.fna [PE in_reads2.fa] out_PREFIX [<trim>] [RC] \n" \ "[D [x[.y]]] [CN] [LOG] [ST]\n" ); \ printf("\nINPUT PARAMETERS:\n");\ printf( "in_reads.fa: the reads you wish to process\n");\ printf("[PE in_reads2.fa] (optional): paired-end; include pairs in in_reds2.fa\n"); \ printf( "\n" "OUTPUT PARAMETERS:\n");\ printf( "out_PREFIX: prefix for all output files produced\n");\ printf( "<trim[,trim2]> (optional): the number of bases to keep (comma for R2)\n");\ printf( "[RC] (optional): Reverse-complement input sequences\n");\ printf( "[D] <x.y> (optional): Denoise [duplicates x, kmer duplicates/1000 y]\n");\ printf( "Note: using .y discards reads with k-mers < y*1000 duplicates.\n");\ printf( "[CN] (optional): Convert ambigous bases to A's instead of discarding them\n"); \ printf( "[LOG] (optional): Outputs which sequences were filtered out\n"); \ printf( "[ST] (optional): Run k-mer filter with a single thread\n"); \ exit(2);\ } #define LINELEN UINT16_MAX #ifndef PACKSIZE #define PACKSIZE 32 #endif #if PACKSIZE==64 #define WTYPE __uint128_t #define SEQPACKS 1024 #define RSHFT 126 #elif PACKSIZE==32 #define WTYPE uint64_t #define SEQPACKS 2048 #define RSHFT 62 #elif PACKSIZE==16 #define WTYPE uint32_t #define SEQPACKS 4096 #define RSHFT 30 #elif PACKSIZE==8 #define WTYPE uint16_t #define SEQPACKS 8192 #define RSHFT 14 #elif PACKSIZE==4 #define WTYPE uint8_t #define SEQPACKS 16384 #define RSHFT 6 #endif //#define SEQPACKS LINELEN/PACKSIZE //#define RSHFT (PACKSIZE*2)-2 char WORDTEMP[PACKSIZE+1] = {0}; typedef struct #if PACKSIZE<64 __attribute__ ((__packed__)) #endif { WTYPE word; uint32_t ix; uint16_t length; } SortBlock2; typedef struct KMerX KMerX; struct //#if PACKSIZE<64 __attribute__ ((__packed__)) //#endif KMerX { WTYPE word; uint64_t count; KMerX *left, *right; }; // Explicit thread memory management KMerX ***KBANK = 0; size_t KBANK_MAXK = 10000, KBANK_INITBINS = 100; size_t *KBANK_BIN =0, *KBANK_BINCNT = 0, *KBANK_IX = 0; #ifdef USE_QSORT void SB2_qsort(SortBlock2 *arr, unsigned n) { #define SB2_LT(a,b) ((a->word < b->word) || \ (a->word == b->word && a->length < b->length)) QSORT(SortBlock2, arr, n, SB2_LT); } #endif WTYPE *C2Xb; char *ACCEPTED; char *X2C = "ACGTNNNNNNNNNNNNNNNN"; char *X2C_RC = "TGCANNNNNNNNNNNNNNNN"; inline char * num2word(WTYPE num, char * word) { int go = 0; for (; go < PACKSIZE; go++) { WTYPE temp = (WTYPE)num >> RSHFT; word[go] = X2C[temp]; num <<= 2; } return word; } inline char * num2wordRC(WTYPE num, char * word) { int go = PACKSIZE-1; for (; go > -1; go--) { WTYPE temp = (WTYPE)num >> RSHFT; word[go] = X2C_RC[temp]; num <<= 2; } return word; } inline char * decodeStringX(WTYPE * Seq, uint16_t length, char *word, char *newString) { unsigned clumps = length/PACKSIZE; if (PACKSIZE*clumps < length) ++clumps; int z = 0; for (; z < clumps-1; z++) num2word(Seq[z],newString + z*PACKSIZE); num2word(Seq[clumps-1],newString+z*PACKSIZE); newString[length] = 0; return newString; } inline char * decodeStringXRC(WTYPE * Seq, uint16_t length, char *word, char *newString) { newString[length] = 0; unsigned clumps = length/PACKSIZE; if (PACKSIZE*clumps < length) ++clumps; int z = clumps-2; for (; z > -1; z--) num2wordRC(Seq[z],newString + length - (z+1) *PACKSIZE); num2wordRC(Seq[clumps-1],word); register int fold = length % PACKSIZE; if (!fold) fold = PACKSIZE; memcpy(newString,word+PACKSIZE-fold, fold); return newString; } // strict comparator int xcmp(WTYPE *Seq1, WTYPE *Seq2, uint16_t len1, uint16_t len2) { unsigned length = len1 < len2 ? len1 : len2; //len1 is min register unsigned clumps = (unsigned)length/PACKSIZE; if (PACKSIZE*clumps < length) ++clumps; int z = 0; for (; z < clumps; ++z) if (Seq1[z]!=Seq2[z]) return Seq1[z] < Seq2[z] ? -1 : 1; return len1 < len2 ? -1 : len1 > len2; } // pre-sorted compactor int ycmp(WTYPE *Seq1, WTYPE *Seq2, uint16_t len1, uint16_t len2) { if (len1 > len2) return 1; // lexicographic guarantee int clumps = (unsigned)len1/PACKSIZE; if (PACKSIZE*clumps < len1) ++clumps; int z = 0; for (; z < clumps-1; ++z) if (Seq1[z]!=Seq2[z]) return 1; // Can differ by length in last clump if (Seq1[z] == Seq2[z]) return 0; if (Seq1[z] > Seq2[z]) return 1; // seq2 must be superset unsigned shift = len1 % PACKSIZE; if (shift) shift = (PACKSIZE - shift) * 2; return (Seq1[z] >> shift) != (Seq2[z] >> shift); } // pre-sorted filter int zcmp(WTYPE *Seq1, WTYPE *Seq2, uint16_t len1, uint16_t len2) { if (len1 != len2) return 1; register unsigned clumps = (unsigned)len1/PACKSIZE; if (PACKSIZE*clumps < len1) ++clumps; int z = 0; for (; z < clumps; ++z) if (Seq1[z]!=Seq2[z]) return 1; return 0; } #ifndef min #define min(a, b) ((a)<=(b) ? (a) : (b)) #endif #define ch(i) *(**(a+i) + depth) #define med3(ia, ib, ic) med3func(a, ia, ib, ic, depth) #define CUTOFF 10 #define MEDCUT 50 // Swaps two characters in a vector inline void swap(char ***a, int i, int j) { char **t = *(a+i); *(a+i) = *(a+j); *(a+j) = t; } inline void vecswap(char ***a, int i, int j, int n) { while (n-->0) swap(a, i++, j++); } // Returns median of ints, used in twrqs inline int med3func(char ***a, int ia, int ib, int ic, int depth) { int va, vb, vc; if ((va=ch(ia)) == (vb=ch(ib))) return ia; if ((vc=ch(ic)) == va || vc == vb) return ic; return va < vb ? (vb < vc ? ib : (va < vc ? ic : ia ) ) : (vb > vc ? ib : (va < vc ? ia : ic ) ); } // Insertion sort delegated to by twrqs inline void inssort(char ***a, int n, int depth) { int i, j; for (i = 1; i < n; i++) for (j = i; j > 0; j--) { if (strcmp(**(a+j-1)+depth, **(a+j)+depth) <= 0) break; swap(a, j, j-1); } } // 3-way Radix Quicksort void twrqs(char ***a, unsigned n, int depth) { if (n < CUTOFF) { inssort(a, n, depth); return; } unsigned pl = 0, pm = n >> 1, d; int le, lt, gt, ge, r, v, pn = n-1; // if large enough, get median of median if (n > MEDCUT) { d = n >> 3; pl = med3(pl, pl+d, pl + (d << 1)); pm = med3(pm-d, pm, pm+d); pn = med3(pn - (d << 1), pn-d, pn); } pm = med3(pl, pm, pn); swap(a, 0, pm); v = ch(0); // grab first letter for (le = 1; le < n && ch(le) == v; le++); if (le == n) { if (v != 0) twrqs(a, n, depth+1); return; } lt = le; gt = ge = n-1; // core QS module; partition the data recursively for (;;) { for ( ; lt <= gt && ch(lt) <= v; lt++) if (ch(lt) == v) swap(a, le++, lt); for ( ; lt <= gt && ch(gt) >= v; gt--) if (ch(gt) == v) swap(a, gt, ge--); if (lt > gt) break; swap(a, lt++, gt--); } r = min(le, lt-le); vecswap(a, 0, lt-r, r); r = min(ge-gt, n-ge-1); vecswap(a, lt, n-r, r); twrqs(a, lt-le, depth); if (v != 0) twrqs(a + lt-le, le + n-ge-1, depth+1); twrqs(a + n-(ge-gt), ge-gt, depth); } inline size_t crBST(char *key, size_t sz, char **String) { char **p = String; while (sz) { size_t w = sz >> 1; char *ref_s = *(p+w+1), *key_s = key; while (*ref_s == *key_s++) if (!*ref_s++) return p+w+1-String; if (*ref_s < *(key_s-1)) { p+=w+1; sz-=w+1; } else sz = w; } char *ref_s = *p, *key_s = key; while (*ref_s == *key_s++) if (!*ref_s++) return p - String; return -1; //return p - String; // replace last 3 lines for unsafe ver } int SB2Cmp(blk1, blk2) register const void *blk1, *blk2; { if (((SortBlock2 *)blk1)->word < ((SortBlock2 *)blk2)->word) return -1; if (((SortBlock2 *)blk1)->word > ((SortBlock2 *)blk2)->word) return 1; if (((SortBlock2 *)blk1)->length == ((SortBlock2 *)blk2)->length) return 0; if (((SortBlock2 *)blk1)->length < ((SortBlock2 *)blk2)->length) return -1; return 1; } void superSort2(uint32_t *SeqIX, WTYPE **base, uint16_t *Lengths, int depth, size_t beginRange, size_t endRange) { size_t n = endRange - beginRange; // endRange is one after last index SortBlock2 *BinPtrs = malloc(n * sizeof(*BinPtrs)); if (!BinPtrs) {fputs("Error: memory (sort).\n",stderr); exit(3);} size_t depthSize = (depth+1) * PACKSIZE; size_t i = beginRange; for (; i < endRange; ++i) BinPtrs[i-beginRange] = (SortBlock2){base[SeqIX[i]][depth],SeqIX[i], Lengths[SeqIX[i]] <= depthSize ? Lengths[SeqIX[i]] : 0}; #ifdef USE_QSORT SB2_qsort(BinPtrs,n); #else qsort(BinPtrs, n, sizeof(*BinPtrs), SB2Cmp); #endif for (i=beginRange; i < endRange; ++i) SeqIX[i] = BinPtrs[i-beginRange].ix; free(BinPtrs); #define CASCADE_MERGE() \ if (i != lastUniq + 1) { \ /* Merge swapping indices for truncated pairs */ \ size_t z = lastUniq; for (; z < i; ++z) { \ if (Lengths[SeqIX[z]] <= depthSize) { \ if (z > lastUniq) { \ /* swap this ix with the ix at lastUniq++ */ \ uint32_t temp = SeqIX[z]; \ SeqIX[z] = SeqIX[lastUniq]; \ SeqIX[lastUniq] = temp; \ } \ ++lastUniq; \ } \ } \ /* Spawn a new sort on the remainder */ \ if (lastUniq < i-1) \ superSort2(SeqIX, base, Lengths, depth+1, lastUniq, i); \ } // Check for duplicates; for each set, move truncations to top WTYPE curElem = base[SeqIX[beginRange]][depth]; size_t lastUniq = beginRange; for (i=beginRange + 1; i < endRange; ++i) { if (base[SeqIX[i]][depth] != curElem) { CASCADE_MERGE(); curElem = base[SeqIX[i]][depth]; lastUniq = i; } } CASCADE_MERGE(); // end cap } inline KMerX * xalloc(int thread, WTYPE word) { // KBANK,KBANK_INITBINS,KBANK_MAXK,KBANK_BIN,KBANK_IX KMerX *Kptr = KBANK[thread][KBANK_BIN[thread]] + KBANK_IX[thread]; *Kptr = (KMerX){word,1,0,0}; if (++KBANK_IX[thread] == KBANK_MAXK) { // reset the ix, increment bin KBANK_IX[thread] = 0; if (++KBANK_BIN[thread] == KBANK_BINCNT[thread]) { // resize bin array KBANK[thread] = realloc(KBANK[thread], sizeof(*KBANK[thread])*(KBANK_BINCNT[thread]*=2)); if (!KBANK[thread]) { puts("ERROR: xalloc 1"); exit(3); } for (size_t x=KBANK_BINCNT[thread]/2; x<KBANK_BINCNT[thread]; ++x) { KBANK[thread][x] = malloc(KBANK_MAXK*sizeof(*KBANK[thread][x])); if (!KBANK[thread][x]) { puts("ERROR: xalloc 2"); exit(3); } } } } return Kptr; } void rexalloc(int threads) { // dynamically frees tree memory for (int i = 0; i < threads; ++i) { KBANK[i] = realloc(KBANK[i],sizeof(*KBANK[i])*KBANK_BIN[i]); KBANK[i][KBANK_BIN[i]] = realloc(KBANK[i][KBANK_BIN[i]], sizeof(*KBANK[i][KBANK_BIN[i]]) * KBANK_IX[i]); } } //////////// Tree manipulation methods ///////////// // returns whether new node was created; a counter int xeTree(KMerX *tree, WTYPE word, int T) { do { if (word > tree->word) { // go right if (!tree->right) { tree->right = xalloc(T,word); return 1; } tree = tree->right; } else if (word < tree->word) { // go left if (!tree->left) { tree->left = xalloc(T,word); return 1; } tree = tree->left; } } while (word != tree->word); ++tree->count; return 0; } // for repopulating an existing tree void reTree(KMerX *tree, KMerX *node) { for (;;) { if (node->word > tree->word) { // go right if (!tree->right) { node->left = 0; node->right = 0; tree->right = node; return; } tree = tree->right; } else { // go left if (!tree->left) { node->left = 0; node->right = 0; tree->left = node; return; } tree = tree->left; } } } // for merging existing trees (returns if new node added) int meNode(KMerX *tree, KMerX *node) { do { if (node->word > tree->word) { // go right if (!tree->right) { node->left = 0; node->right = 0; tree->right = node; return 1; // node->count; } tree = tree->right; } else if (node->word < tree->word) { // go left if (!tree->left) { node->left = 0; node->right = 0; tree->left = node; return 1; // node->count; } tree = tree->left; } } while (node->word != tree->word); tree->count += node->count; return 0; } // find in tree size_t fiTree(KMerX *tree, WTYPE word) { do { if (word > tree->word) { // go right if (!tree->right) return 0; tree = tree->right; } else if (word < tree->word) { // go left if (!tree->left) return 0; tree = tree->left; } } while (word != tree->word); return tree->count; } // get in tree (known existence) size_t giTree(KMerX *tree, WTYPE word) { do { if (word > tree->word) tree = tree->right; else if (word < tree->word) tree = tree->left; } while (word != tree->word); return tree->count; } // merge trees void meTree(KMerX *tree, KMerX *tree2, size_t *totals) { KMerX *left = tree2->left, *right = tree2->right; *totals += meNode(tree,tree2); if (left) meTree(tree, left, totals); if (right) meTree(tree, right, totals); } // Populates array with nodes in balanced order void traceBalance(KMerX *tree, KMerX **array, size_t *ix) { if (tree->left) traceBalance(tree->left, array, ix); array[(*ix)++] = tree; // if on top, DFS. If mid, IOS, if bot: LFS if (tree->right) traceBalance(tree->right, array, ix); } // Builds a balanced tree void buildBalanceL(KMerX *tree, KMerX **array, size_t sz); void buildBalanceR(KMerX *tree, KMerX **array, size_t sz); #define BUILDBALANCE() \ if (!sz) { \ CHILD = *array; \ CHILD->left = 0; \ CHILD->right = 0; \ return; \ } \ size_t ix = sz >> 1; \ CHILD = array[ix]; \ if (ix) buildBalanceL(CHILD,array,ix-1); \ else CHILD->left = 0; \ buildBalanceR(CHILD,array+(ix+1), sz-(ix+1)); // set a branch of the given tree, and recurse with that branch as root void buildBalanceL(KMerX *tree, KMerX **array, size_t sz) { #define CHILD tree->left BUILDBALANCE() #undef CHILD } void buildBalanceR(KMerX *tree, KMerX **array, size_t sz) { #define CHILD tree->right BUILDBALANCE() #undef CHILD } /////////// Tree reporting methods /////////// void traceCnt(KMerX *tree, size_t *ix) { if (tree->left) traceCnt(tree->left, ix); ++*ix; if (tree->right) traceCnt(tree->right, ix); } void traceTree(KMerX *tree) { if (tree->left) traceTree(tree->left); printf("%s\t%I64u\n",num2word(tree->word,WORDTEMP),tree->count); if (tree->right) traceTree(tree->right); } void traceTreeDetail(KMerX *tree, int depth) { printf("%d\t%s\t%I64u\n",depth, num2word(tree->word,WORDTEMP),tree->count); if (tree->left) traceTreeDetail(tree->left, depth+1); if (tree->right) traceTreeDetail(tree->right, depth+1); } size_t buildDepth(KMerX *node, int depth, int *depthMax, size_t *depthTot, size_t *count) { if (node->left) buildDepth(node->left,depth+1,depthMax,depthTot,count); if (depth > *depthMax) *depthMax = depth; ++(*count); (*depthTot) += depth; //printf("%s\t%I64u\n",num2word(tree->word,WORDTEMP),tree->count); if (node->right) buildDepth(node->right,depth+1,depthMax,depthTot,count); } void reportAvMaxDepth(KMerX *tree) { int depthMax = 0; size_t count = 0, depthTot=0; buildDepth(tree,1,&depthMax, &depthTot, &count); double depthAv = (double)(depthTot)/count; printf("Total nodes = %lu. Max depth=%d, Avg=%f\n",count,depthMax,depthAv); } /////////// Tree sorters/comparators (for balancing) //////////// int tfs_cmp(const void *a, const void *b) { KMerX *b1 = *(KMerX **)a, *b2 = *(KMerX **)b; return (b1->count > b2->count) ? -1 : (b1->count < b2->count); } void treeFreqSort(KMerX **arr, size_t n) { #ifdef USE_QSORT #define NODEFREQGT(a,b) ((*a)->count > (*b)->count) QSORT(KMerX*, arr, n, NODEFREQGT); #else qsort(arr, n, sizeof(*arr), tfs_cmp); #endif } int tns_cmp(const void *a, const void *b) { KMerX *b1 = *(KMerX **)a, *b2 = *(KMerX **)b; return (b1->word < b2->word) ? -1 : (b1->word > b2->word); } void treeNameSort(KMerX **arr, size_t n) { #ifdef USE_QSORT #define NODENAMELT(a,b) ((*a)->word < (*b)->word) QSORT(KMerX*, arr, n, NODENAMELT); #else qsort(arr, n, sizeof(*arr), tns_cmp); #endif } // Main frequency-dependent balancing function KMerX * balanceTree(KMerX *tree, size_t sz, size_t totalCount) { // set limits #define MAX_NODES 1000000 #define TOP_SHIFT 5 if (sz > MAX_NODES) return tree; size_t ix = 0; KMerX **array = malloc(sizeof(*array) * (sz+1)); traceBalance(tree, array, &ix); // experimental intervention: frequency-first tree construction treeFreqSort(array, sz+1); // Adaptive threshold determination size_t limit = 0; size_t thres = 0, cutoff = totalCount >> TOP_SHIFT; while ((thres += array[limit++]->count) < cutoff); //while (++limit <= sz && array[0]->count/array[limit]->count < 4); //printf("limit = %lld\n",limit); tree = array[0]; tree->left = 0; tree->right=0; if (limit > 2) { for (size_t i = 1; i < limit; ++i) reTree(tree,array[i]); // balance the top KMerX **top = malloc(sizeof(*top)* limit); for (size_t i = 0; i < limit; ++i) top[i] = array[i]; treeNameSort(top, limit); ix = (limit-1)/2; tree = top[ix]; buildBalanceL(tree, top, ix-1); buildBalanceR(tree, top + (ix+1), limit - 1 - (ix+1)); free (top); } else limit = 1; size_t limit2 = (sz+1); // Add in the rest //for (size_t i = limit; i < limit2; ++i) reTree(tree,array[i]); // Add in the rest v2 (stacade) //size_t limit3 = (sz+1); limit2 = (sz+1)/2; // comment out to enable bottomBalance if (limit2 > limit) { int L = 1; for (long long i = limit; i < limit2-1; ++i) { reTree(tree,array[i+L]); L = -L; } if (L==-1) reTree(tree,array[limit2-2]); else reTree(tree,array[limit2-1]); } free(array); return tree; } // SMP tree insersion method inline void clumpParachute(KMerX **Roots, WTYPE *Clumps, size_t *NumsInserted, size_t *TotalCounts, size_t *BalanceThreshes, size_t length) { //printf("here we go...\n"); #pragma omp parallel for schedule(dynamic,1000) for (int i = 0; i < length; ++i) { int tid = 0; #ifdef _OPENMP tid = omp_get_thread_num(); #endif ++TotalCounts[tid]; if (!Roots[tid]->count) { *Roots[tid] = (KMerX){Clumps[i],1,0,0}, NumsInserted[tid]=1; continue; } NumsInserted[tid] += xeTree(Roots[tid],Clumps[i],tid); if (NumsInserted[tid] >= BalanceThreshes[tid]) { //printf("Balancing tree (tid %d) at %lu\n",tid,NumsInserted[tid]); Roots[tid] = balanceTree(Roots[tid],NumsInserted[tid]-1, TotalCounts[tid]); /* reportAvMaxDepth(root); */ /* if (balanceThres==65535) {traceTreeDetail(root,0); return 0; } */ BalanceThreshes[tid]=(BalanceThreshes[tid]+1)*2-1; } } } // SMP aggregator: each thread's tree is merged into large tree KMerX* mergeParachutes(KMerX **Roots, int T, size_t *NumsInserted, size_t *TotalCounts, size_t *numInserted, size_t *totalCount) { *totalCount = *TotalCounts; // *TotalCounts; *numInserted = *NumsInserted; for (size_t i = 1; i < T; ++i) { *totalCount += TotalCounts[i]; meTree(*Roots,Roots[i],numInserted); } //traceCnt(*Roots,numInserted); return *Roots; } /////////////// K-mer denoisers //////////////// #ifdef DO_DEEP_K_DENOISE inline size_t findRarestK(KMerX *tree, WTYPE *seq, uint16_t length) { size_t min = giTree(tree,*seq), cur; unsigned offset = 0, basePack = 1; for (int i = 1, b=length-PACKSIZE+1; i < b; ++i) { if (++offset == PACKSIZE) cur = giTree(tree, seq[basePack]), ++basePack, offset = 0; else cur = giTree(tree, (*(seq+basePack-1) << (offset << 1)) + (*(seq+basePack) >> ((PACKSIZE-offset) << 1))); //printf("%llu [%s=%u:%u], ", cur,num2word(this,WORDTEMP),offset,basePack); //printf("[%llu] ",cur); if (cur < min) min = cur; } //printf("MIN=%llu\n",min); return min; } #else inline size_t findRarestK(KMerX *tree, WTYPE *seq, uint16_t length) { size_t numPacks = length/PACKSIZE; //if (numPacks * PACKSIZE < length) ++numPacks; size_t min = (size_t)-1, cur; for (int i = 0; i < numPacks; ++i) { cur = giTree(tree, seq[i]); //printf("%llu, ", cur); if (cur < min) min = cur; } #ifdef DO_K_ENDPIECE if (numPacks * PACKSIZE < length) { // handle endpiece unsigned mod = length % PACKSIZE; // guarantee: never 0 // rightshift by 2xmodulo WTYPE prev = (seq[numPacks-1] << (2*mod)) + (seq[numPacks] >> (2*(PACKSIZE-mod))); cur = fiTree(tree, prev); if (cur < min) min = cur; } //printf("\n"); #endif return min; } #endif size_t findRarestK_PE(KMerX *tree, WTYPE *seq, uint16_t length, uint16_t length2) { size_t numPacks = (length - length2)/PACKSIZE, min = (size_t)-1, cur; int ditch = numPacks * PACKSIZE < (length - length2); int i = 0; for (; i < numPacks; ++i) { cur = giTree(tree,seq[i]); if (cur < min) min = cur; } numPacks = length/PACKSIZE; i += ditch; for (; i < numPacks; ++i) { cur = giTree(tree,seq[i]); if (cur < min) min = cur; } return min; } int main( int argc, char *argv[] ) { clock_t start; double cpu_time_used; start = clock(); // profiler // Debugging statements #ifdef DEBUG printf("type size=%u, shift=%u, pack=%u\n", sizeof(WTYPE), RSHFT, PACKSIZE); printf("max int size=%u/%u\n",sizeof(unsigned),sizeof(uint64_t)); printf("Size of SortBlock2=%u\n",sizeof(SortBlock2)); #endif if ( argc < 3 || argc > 12 ) PRINT_USAGE(); int carg = 1; char *inputFilename = argv[carg++]; char *read2Str = 0; if (!strcmp(argv[carg],"PE")) ++carg, read2Str = argv[carg++]; printf("%ssing paired-end reads %s\n", read2Str ? "U" : "Not u", read2Str ? read2Str : ""); char *prefixStr = argv[carg++]; if (carg > argc) {puts("Error: prefix required."); return 2; } char *fasta_sx = "_filt.fa", *db_sx = ".db", *dp_sx = "_dupes.txt", *filt_sx = "_filtered.txt", *fasta2_sx = "2_filt.fa"; char *outputFasta = calloc(1,1+strlen(prefixStr)+strlen(fasta_sx)), *outputFasta2 = calloc(1,1+strlen(prefixStr)+strlen(fasta2_sx)), *outputDB = calloc(1,1+strlen(prefixStr)+strlen(db_sx)), *outputDP = calloc(1,1+strlen(prefixStr)+strlen(dp_sx)), *outputFL = calloc(1,1+strlen(prefixStr)+strlen(filt_sx)); strcpy(outputFasta,prefixStr); strcpy(outputFasta+strlen(prefixStr),fasta_sx); strcpy(outputFasta2,prefixStr); strcpy(outputFasta2+strlen(prefixStr),fasta2_sx); strcpy(outputDB,prefixStr); strcpy(outputDB+strlen(prefixStr),db_sx); strcpy(outputDP,prefixStr); strcpy(outputDP+strlen(prefixStr),dp_sx); strcpy(outputFL,prefixStr); strcpy(outputFL+strlen(prefixStr),filt_sx); FILE *fp = fopen(inputFilename, "rb"); FILE *r2 = read2Str ? fopen(read2Str,"rb") : 0; if (!fp || (read2Str && !r2)) { puts("Invalid input FASTA(s)"); return 2; } FILE *off = fopen(outputFasta, "wb"), *ofd = fopen(outputDB,"wb"); FILE *off2 = read2Str ? fopen(outputFasta2, "wb") : 0; if (!off || !ofd || (read2Str && !off2)) { puts("Invalid output prefix; cannot create output file(s)."); return 2; } FILE *ofdp=0, *ofdpF = 0; size_t trim = UINT16_MAX, trim2 = UINT16_MAX; int doRC = 0, doLog = 0; double filt_i = 0.f; int copyNumThres = 0; // denoisers int numThreads = 1, convert_amb = 0; if (strcmp(argv[argc-1],"ST")) { // enable MT if not "ST" #ifdef _OPENMP numThreads = omp_get_max_threads(); #endif } else --argc; if (!strcmp(argv[argc-1],"CN")) { // convert N's to A's convert_amb = 1; --argc; } if (argc > 3 && !strcmp(argv[argc-1],"LOG")) { // enable MT if not "ST" doLog = 1; ofdp=fopen(outputDP,"wb"); ofdpF=fopen(outputFL,"wb"); if (!ofdp || !ofdpF) { puts("Invalid output prefix"); exit(2); } puts("Log writing enabled."); --argc; } #ifdef _OPENMP omp_set_num_threads(numThreads); #else numThreads = 1; #endif // Denoises at default intensity if (argc > 3 && !strcmp(argv[argc-1],"D")) { filt_i = 2.f; printf("Performing NINJA k-mer denoising at DEFAULT intensity: %.0f k-mers\n",filt_i); --argc; } // Denoises at specified intensity in the form x.y else if (argc > 3 && !strcmp(argv[argc-2],"D")) { filt_i = atof(argv[argc-1]); if (filt_i < 0) printf("Invalid denoising intensity (expect #REPS[.###Kmers]).\n"); else { if (filt_i >= 1.f) { copyNumThres = filt_i; filt_i -= copyNumThres; if (copyNumThres > 1 || !read2Str) printf("Performing NINJA replicon-denoising" " at %u %sreads.\n", copyNumThres, read2Str ? "" : "compacted "); } if (filt_i) { // Use the decimal remainder as kmer denoising printf("Performing NINJA k-mer denoising at %.0f k-mers\n", filt_i*1000.f); filt_i *= 1000; if (copyNumThres) ++copyNumThres; } } argc -= 2; } int (*cmpF)(WTYPE *, WTYPE *, uint16_t, uint16_t) = (copyNumThres && !read2Str) ? &ycmp : &zcmp; //zcmp replaces xcmp if (!copyNumThres) copyNumThres = filt_i ? -1 : 1; if (argc > 3 && !strcmp(argv[argc-1],"RC")) { printf("Reverse complementing the sequences.\n"); doRC = 1; --argc; } // Flags for truncation after specified base if (argc == 4 || (read2Str && argc > 5)) { char *arg = argv[argc-1]; int tlen = strlen(arg); char *cix = strchr(arg,','); trim = atoi(argv[argc-1]) ?: trim; if (cix) trim2 = atoi(cix+1) ?: trim2; else trim2 = trim; printf("Trimming %s sequences to %d bases.\n", read2Str && cix ? "r1" : "input", trim); if (read2Str && cix) printf("Trimming r2 sequences to %d bases.\n",trim2); } C2Xb = calloc(256,sizeof(WTYPE)); C2Xb['a'] = 0; C2Xb['A'] = 0; C2Xb['c'] = 1; C2Xb['C'] = 1; C2Xb['g'] = 2; C2Xb['G'] = 2; C2Xb['t'] = 3; C2Xb['T'] = 3; ACCEPTED = calloc(256,sizeof(*ACCEPTED)); ACCEPTED['a'] = 1; ACCEPTED['A'] = 1; ACCEPTED['c'] = 1; ACCEPTED['C'] = 1; ACCEPTED['g'] = 1; ACCEPTED['G'] = 1; ACCEPTED['t'] = 1; ACCEPTED['T'] = 1; size_t numElem = 1000, ns=0; char **Samples = malloc(numElem*sizeof(char *)); char **SeqIDs = doLog ? malloc(numElem*sizeof(*SeqIDs)) : 0; WTYPE **ReadsX = malloc(numElem*sizeof(WTYPE *)); uint16_t *Sizes = calloc(numElem,sizeof(uint16_t)); uint16_t *Sizes2 = read2Str ? calloc(numElem,sizeof(uint16_t)) : 0; char *line = malloc(LINELEN + 1), *initLine = line, // read up to 65k *line2 = malloc(LINELEN + 1), *initLine2 = line2; // MT versions of k-denoisers size_t queuedClumps = 0, fireThres = 1000000; #define BAL_THRES 255 KMerX **Roots = 0; WTYPE *Clumps = 0; size_t *NumsInserted=0, *TotalCounts=0, *BalanceThreshes=0; //KMerX **KBANK = 0; // GLOBAL VARIABLES // size_t KBANK_MAXK = 1000, *KBANK_BIN =0, *KBANK_IX = 0; if (filt_i) { printf("Number of threads for k-mer denoise: %d\n",numThreads); Roots = malloc(numThreads*sizeof(*Roots)); Clumps = malloc(fireThres*sizeof(*Clumps)); NumsInserted = calloc(numThreads,sizeof(*NumsInserted)); TotalCounts = calloc(numThreads,sizeof(*TotalCounts)); BalanceThreshes = malloc(numThreads*sizeof(*BalanceThreshes)); KBANK = malloc(numThreads*sizeof(*KBANK)); KBANK_BIN = calloc(numThreads,sizeof(*KBANK_BIN)); KBANK_BINCNT = malloc(numThreads*sizeof(*KBANK_BINCNT)); KBANK_IX = calloc(numThreads,sizeof(*KBANK_IX)); for (int i = 0; i < numThreads; ++i) { Roots[i] = malloc(sizeof(*Roots[i])); *Roots[i] = (KMerX){0,0,0,0}; BalanceThreshes[i] = BAL_THRES; KBANK[i] = malloc(KBANK_INITBINS*sizeof(*KBANK[i])); KBANK_BINCNT[i] = KBANK_INITBINS; for (int j=0; j < KBANK_INITBINS; ++j) { KBANK[i][j] = malloc(KBANK_MAXK*sizeof(*KBANK[i][j])); // init this bin's kmers if (!KBANK[i][j]) {puts("error: xalloc 0"); exit(3); } } } } size_t ns_amb = 0, n_warned = 0, rejected = 0, totalCnt = 0; while (line = fgets(line,LINELEN,fp)) { ++totalCnt; if (ns == numElem) { numElem *= 2; Samples = realloc(Samples,numElem * sizeof(*Samples)); ReadsX = realloc(ReadsX, numElem * sizeof(*ReadsX)); Sizes = realloc(Sizes, numElem*sizeof(*Sizes)); if (!Samples || !ReadsX || !Sizes) { fputs("Error in resize: memory.\n",stderr); exit(3); } if (read2Str) { Sizes2 = realloc(Sizes2, numElem*sizeof(*Sizes2)); if (!Sizes2) {fputs("Error in resize: memory.\n",stderr); exit(3);} } if (doLog) { SeqIDs = realloc(SeqIDs, numElem * sizeof(*SeqIDs)); if (!SeqIDs) {fputs("Error in resize: memory.\n",stderr); exit(3);} } } // Check format consistency if (*line != '>') { fprintf(stderr,"FASTA error; expected '>' on line %llu\n",totalCnt); exit(2); } // copy in the sample name up to _ or null minus 1 char *src = line + 1; while (*src != '_' && *src != ' ' && *src != '\n') ++src; if (doLog) { // also trace until whitespace for sample id char *seqID = src; while (*seqID != ' ' && *seqID != '\n' && *seqID != '\r') ++seqID; SeqIDs[ns] = malloc(seqID - src + 1); if (!SeqIDs[ns]) {puts("Out of memory for SeqIDs"); exit(3);} char *d = SeqIDs[ns]; char *b = src; while (b < seqID) *d++ = *b++; *d = 0; } Samples[ns] = malloc(src - line); if (!Samples[ns]) {puts("Not enough Samples[ns] mem"); exit(3);} char *dest = Samples[ns]; char *beginSample = line + 1; while (beginSample < src) *dest++ = *beginSample++; *dest = 0; // copy in the encoded sequence(s) if (!(line = fgets(line,LINELEN,fp))) { fputs("FASTA error: unexpected end of file (R1).\n",stderr); exit(2); } if (*line == '>') { fprintf(stderr,"FASTA error; unexpected '>' on line %llu (R1)\n",totalCnt); exit(2); } src = line; register size_t length = strlen(src); if (src[length-1] == '\n') --length; // lop off newline(s) if (src[length-1] == '\r') --length; // supports every platform! if (trim < length) length = trim; if (length >= UINT16_MAX) { printf("Warning: truncating read %llu.\n",ns); length = UINT16_MAX - 1; } size_t numPacks; // Check second sequence int len2 = 0; char *src2; if (read2Str) { fgets(line2,LINELEN,r2); // skip sample if (!(line2 = fgets(line2,LINELEN,r2))) { fputs("FASTA error: unexpected end of file (R1).\n",stderr); exit(2); } if (*line == '>') { fprintf(stderr,"FASTA error; unexpected '>' on line %llu (R2)\n",totalCnt); exit(2); } len2 = strlen(line2); if (line2[len2-1] == '\n') --len2; // lop off newline(s) if (line2[len2-1] == '\r') --len2; // supports every platform! src2 = line2; if (trim2 < len2) src2 += len2 - trim2, len2 = trim2; if (len2 >= UINT16_MAX) { printf("Warning: truncating read 2: %llu.\n",ns); len2 = UINT16_MAX - 1; } Sizes2[ns] = len2; length += len2; // first length is compounded } Sizes[ns] = length; numPacks = length/PACKSIZE; if (numPacks * PACKSIZE < length) ++numPacks; ReadsX[ns] = malloc(numPacks*sizeof(WTYPE)); if (!ReadsX[ns]) {puts("Bad ReadsX[ns] mem"); return 3; } #define GENERATE_WORD_PRE() \ for (; k < bound; ++k, ++z) { \ clump <<= 2u; \ !ACCEPTED[*src] && (amb=1,++n_warned);\ clump += C2Xb[*src++]; \ if (z == PACKSIZE) *thisPack++ = clump, z = 0; #define GENERATE_KMER() \ if (k + 2 > PACKSIZE) { \ Clumps[queuedClumps++] = clump; \ if (queuedClumps == fireThres) { \ clumpParachute(Roots,Clumps,NumsInserted, \ TotalCounts,BalanceThreshes,fireThres); \ queuedClumps = 0; \ } \ } #define GENERATE_WORD_POST() } int k = 1, z = 2, bound = length - len2, amb = 0; WTYPE *thisPack = ReadsX[ns]; WTYPE clump = C2Xb[*src]; !ACCEPTED[*src++] && (amb=1,++n_warned); if (filt_i) GENERATE_WORD_PRE() GENERATE_KMER() GENERATE_WORD_POST() else GENERATE_WORD_PRE() GENERATE_WORD_POST() if (read2Str) { k = 0; // also resets k-mer for read 2 bound = len2; src = src2; if (filt_i) GENERATE_WORD_PRE() GENERATE_KMER() GENERATE_WORD_POST() else GENERATE_WORD_PRE() GENERATE_WORD_POST() } numPacks *= PACKSIZE; if (numPacks > length) *thisPack++ = clump << ((numPacks - length) << 1); if (amb) { ++ns_amb; if (!convert_amb) { if (doLog) { ++rejected; fprintf(ofdpF,"%s%s\tAMBIGUOUS\n",Samples[ns],SeqIDs[ns]); free(SeqIDs[ns]); } free(Samples[ns]); free(ReadsX[ns]); continue; //without incrementing } } ++ns; } if (n_warned) printf("WARNING: Found %llu sequences with ambiguity" " (%llu ambiguous bases).\n",ns_amb,n_warned); KMerX *master = 0; if (filt_i) { if (queuedClumps) clumpParachute(Roots,Clumps,NumsInserted, TotalCounts,BalanceThreshes,queuedClumps); size_t numInserted = 0, totalCount = 0; if (numThreads > 1) { master = mergeParachutes(Roots, numThreads, NumsInserted, TotalCounts,&numInserted, &totalCount); //master = balanceTree(master,numInserted-1, totalCount); //master = quickBalance(master,numInserted-1); } else { master = *Roots; numInserted = *NumsInserted; totalCount = *TotalCounts; } //traceTreeDetail(*Roots,0); printf("Distinct K-mers found: %lu, Total k-mers: %llu\n",numInserted,totalCount); #ifdef DEBUG reportAvMaxDepth(master); #endif //rexalloc(numThreads); } fclose(fp); free(line); // Shrink data structures for more memory Samples = realloc(Samples,ns * sizeof(*Samples)); ReadsX = realloc(ReadsX, ns * sizeof(*ReadsX)); Sizes = realloc(Sizes, ns * sizeof(*Sizes)); if (read2Str) Sizes2 = realloc(Sizes2, ns * sizeof(*Sizes2)); if (doLog) SeqIDs = realloc(SeqIDs, ns * sizeof(*SeqIDs)); printf("Number of sequences: %u\n",ns + ns_amb); if (ns > UINT32_MAX) {puts("Too many sequences (>4 Bn)."); return 4;} printf("Total reads considered: %u\n",ns); #ifdef PROFILE printf("->Short read parse: %f\n", ((double) (clock() - start)) / CLOCKS_PER_SEC); start = clock(); #endif // Create index structure for sequences read (in 32-bit) uint32_t *SeqIX = malloc(ns * sizeof(*SeqIX)); size_t k = 0; for (; k < ns; ++k) SeqIX[k] = k; superSort2(SeqIX, ReadsX, Sizes, 0,0,ns); printf("Reads sorted.\n"); char ***smpSrt = malloc(ns * sizeof(*smpSrt)), **SmpDD = malloc(ns * sizeof(*SmpDD)); if (!smpSrt || !SmpDD) { fprintf(stderr,"Out of post-memory: parray.\n"); exit(3); } for (k=0; k < ns; ++k) smpSrt[k] = &Samples[k]; twrqs(smpSrt, ns, 0); *SmpDD = **smpSrt; // store first sample unsigned x = 1; for (k=1; k < ns; ++k) if (strcmp(*smpSrt[k-1],*smpSrt[k])) SmpDD[x++] = *smpSrt[k]; free(smpSrt); SmpDD = realloc(SmpDD,sizeof(char*)*x); printf("%d Samples found.\n",x); if (x == ns) { puts("*************************************"); puts("* WARNING!! WARNING!! WARNING!! *"); puts("* No. of samples = no. of reads *"); puts("* Casting # of samples to 1. *"); puts("*************************************"); x = 1, *SmpDD = "AllSamps"; } fprintf(ofd, "%u\n", x); for (k=0; k < x; ++k) fprintf(ofd,"%s\n", SmpDD[k]); #ifdef PROFILE printf("->Short read sample prep: %f\n", ((double) (clock() - start)) / CLOCKS_PER_SEC); start = clock(); #endif // Create counts array of integers parallel to the unique samples array unsigned *Counts = calloc(x, sizeof(*Counts)); if (!Counts) {puts("unable to allocate counts"); return 3;} int64_t i_copyThres = filt_i ? copyNumThres-1 : INT64_MAX; size_t filt_n = filt_i; #ifdef DEBUG printf("copyNumThres=%d, copyThres=%llu, filt_i=%f [%u]\n", copyNumThres,i_copyThres,filt_i,filt_n); #endif #define WRITE_SUPPORTED_DUPE() {\ if (copies >= copyNumThres || (copies >= i_copyThres && \ (read2Str ? findRarestK_PE(master,ReadsX[prevIX], Sizes[prevIX],Sizes2[prevIX]) : \ findRarestK(master, ReadsX[prevIX], Sizes[prevIX])) >= filt_n)) { \ /* printf("\nfound rarest K=%llu\n",findRarestK2(master, ReadsX[prevIX], Sizes[prevIX])); */ \ if (doLog) { \ for (unsigned w = lastLogged; w < k; ++w) \ ++committed, fprintf(ofdp,"%s%s\t",Samples[SeqIX[w]],SeqIDs[SeqIX[w]]); \ fprintf(ofdp,"\n"); \ lastLogged = k; \ } \ for (int y = 0; y < x; ++y) \ if (Counts[y]) fprintf(ofd,"%u:%u:",y,Counts[y]), Counts[y] = 0; \ fprintf(ofd,"\n"); \ if (doRC) { \ char *bon = decodeStringXRC(ReadsX[prevIX], Sizes[prevIX],word,string); \ if (read2Str) { \ fprintf(off,">%u\n%s\n",rix, bon + Sizes2[prevIX]); \ bon[Sizes2[prevIX]] = 0; \ fprintf(off2,">%u\n%s\n",rix, bon); \ } else fprintf(off,">%u\n%s\n",rix, bon); \ ++rix; \ } \ else { \ char *bon = decodeStringX(ReadsX[prevIX], Sizes[prevIX],word,string); \ if (read2Str) { \ fprintf(off2,">%u\n%s\n", rix, bon + Sizes[prevIX] - Sizes2[prevIX]); \ bon[Sizes[prevIX] - Sizes2[prevIX]] = 0; \ fprintf(off,">%u\n%s\n", rix, bon); \ } else fprintf(off,">%u\n%s\n", rix, bon); \ ++rix; \ } \ } \ else { \ if (doLog) { \ for (unsigned w = lastLogged; w < k; ++w) \ ++rejected, fprintf(ofdpF,"%s%s\tFILTERED\n", \ Samples[SeqIX[w]],SeqIDs[SeqIX[w]]); \ lastLogged = k; \ } \ memset(Counts,0,x*sizeof(unsigned)); \ } \ copies = 1; \ } size_t committed = 0; //, rejected = 0; // now defined before main loop unsigned copies = 1, dupes = 0, rix=0; char *string = malloc(UINT16_MAX), *word = calloc(PACKSIZE+1,1); unsigned prevIX, thisIX, lastLogged = 0; for (k=1; k < ns; ++k) { prevIX = SeqIX[k-1]; thisIX = SeqIX[k]; if (x==1) ++*Counts; else ++Counts[crBST(Samples[prevIX],x-1,SmpDD)]; if (cmpF(ReadsX[prevIX],ReadsX[thisIX],Sizes[prevIX], Sizes[thisIX])) WRITE_SUPPORTED_DUPE() else { ++copies; ++dupes; } } prevIX = thisIX; if (x==1) ++*Counts; else ++Counts[crBST(Samples[prevIX],x-1,SmpDD)]; // add last count WRITE_SUPPORTED_DUPE() if (doLog) printf("Number of reads rejected = %llu, committed = %llu\n", rejected, committed); puts("Finished."); #ifdef PROFILE printf("->Mapping and file writing: %f\n", ((double) (clock() - start)) / CLOCKS_PER_SEC); start = clock(); #endif free (SeqIX); free (string); return 0; }
AUC-openmp.c
// Program: AUC-openmp // Author: Jason Regina // Date: 12 November 2015 // Description: This program approximates pi using the Riemann Sum method #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <omp.h> // This function returns a y-value on a unit circle // centered at the origin, given an x-value double func(double x) { return sqrt(1.0 - (x * x)); } int main( int argc, char** argv ) { // Set number of rectangles and OMP threads int recs = 100000000; int num_threads = 1; // Parse command line const char* name = argv[0]; int c; while ((c = getopt(argc, argv, "n:t:")) != -1) { switch(c) { case 'n': recs = atoi(optarg); break; case 't': num_threads = atoi(optarg); break; case '?': default: fprintf(stderr, "Usage: %s -n [NUMBER_OF_RECTANGLES] -t [OMP_NUM_THREADS]\n", name); return -1; } } argc -+ optind; argv += optind; // Calculate rectangle width double width; width = 1.0 / recs; // Determine first and last elements of process int first = 0, last = recs; // Calculate total area double sum = 0.0; int i = 0; // Set OMP Threads omp_set_num_threads(num_threads); #pragma omp parallel for reduction(+:sum) shared(first,last,width) private(i) for (i = first; i < last; i++) { sum += func(width * i) * width * 4.0; } // Print result printf(" --- %s --- \n", name); printf("Number of processes: %d\n", 1); printf("Threads per process: %d\n", num_threads); printf("Rectangles : %d\n", recs); printf("pi is approximately: %f\n", sum); // Terminate return 0; }
DRB047-doallchar-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* One dimension array computation with finer granularity than traditional 4 bytes. Dynamic tools monitoring 4-bytes elements may wrongfuly report race condition. */ char a[100]; int main() { int i; #pragma omp target data map(tofrom: a[0:100]) { #pragma omp target parallel for for (i = 0; i < 100; i++) a[i] = i; } #pragma omp target data map(tofrom: a[0:100]) { #pragma omp target parallel for for (i = 0; i < 100; i++) a[i] = a[i] + 1; } for (i = 0; i < 100; i++) printf("%c\n", a[i]); return 0; }
fib.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <omp.h> #include "fib.h" #include "../../common/BOTSCommonUtils.h" #define FIB_RESULTS_PRE 41 long long fib_results[FIB_RESULTS_PRE] = {0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765,10946,17711,28657,46368,75025,121393,196418,317811,514229,832040,1346269,2178309,3524578,5702887,9227465,14930352,24157817,39088169,63245986,102334155}; long long int fib_seq (int n) { int x, y; if (n < 2) return n; x = fib_seq(n - 1); y = fib_seq(n - 2); return x + y; } long long int fib (int n) { long long x, y; if (n < 2) return n; #pragma omp task untied shared(x) firstprivate(n) x = fib(n - 1); #pragma omp task untied shared(y) firstprivate(n) y = fib(n - 2); #pragma omp taskwait return x + y; } void print_usage() { fprintf(stderr, "\n"); fprintf(stderr, "Usage: %s -[options]\n", "Fibonacci"); fprintf(stderr, "\n"); fprintf(stderr, "Where options are:\n"); fprintf(stderr, " -n <number> : Get Fibonacci number n\n"); fprintf(stderr, " -h : Print program's usage (this help).\n"); } long long int par_res, seq_res; int main(int argc, char* argv[]) { int n = 20, i; for (i=1; i<argc; i++) { if (argv[i][0] == '-') { switch (argv[i][1]) { case 'n': /* read argument size 0 */ argv[i][1] = '*'; i++; if (argc == i) { "Erro\n"; exit(100); } n = atoi(argv[i]); break; case 'h': /* print usage */ argv[i][1] = '*'; print_usage(); exit (100); break; } } } double t_start, t_end; t_start = rtclock(); #pragma omp parallel #pragma omp single par_res = fib(n); t_end = rtclock(); fprintf(stdout, "Parallel Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); seq_res = fib_seq(n); t_end = rtclock(); fprintf(stdout, "Sequential Runtime: %0.6lfs\n", t_end - t_start); if (par_res == seq_res) { fprintf(stdout, "Result: Successful\n"); } else { fprintf(stdout, "Result: Unsuccessful\n"); } return 0; }
owl_slicing_basic_impl_omp.h
/* * OWL - OCaml Scientific and Engineering Computing * Copyright (c) 2016-2019 Liang Wang <liang.wang@cl.cam.ac.uk> */ #ifdef OWL_ENABLE_TEMPLATE // Level 1 optimisation void FUNCTION (c, slice_1) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int d = p->dim - 1; int n = p->n[d]; int posx = p->posx + p->ofsx[d]; int posy = p->posy + p->ofsy[d]; int incx = p->incx[d]; int incy = p->incy[d]; for (int i = 0; i < n; i++) { MAPFUN (*(x + posx), *(y + posy)); posx += incx; posy += incy; } } // Level 2 optimisation void FUNCTION (c, slice_2) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int d0 = p->dim - 2; int d1 = p->dim - 1; int n0 = p->n[d0]; int n1 = p->n[d1]; int ofsx0 = p->ofsx[d0]; int ofsy0 = p->ofsy[d0]; int incx0 = p->incx[d0]; int incy0 = p->incy[d0]; int ofsx1 = p->ofsx[d1]; int ofsy1 = p->ofsy[d1]; int incx1 = p->incx[d1]; int incy1 = p->incy[d1]; int posx0 = p->posx + ofsx0; int posy0 = p->posy + ofsy0; #pragma omp parallel for schedule(static) for (int i0 = 0; i0 < n0; i0++) { int posx1 = posx0 + ofsx1 + i0 * incx0; int posy1 = posy0 + ofsy1 + i0 * incy0; for (int i1 = 0; i1 < n1; i1++) { MAPFUN (*(x + posx1), *(y + posy1)); posx1 += incx1; posy1 += incy1; } } } // Level 3 optimisation void FUNCTION (c, slice_3) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int d0 = p->dim - 3; int d1 = p->dim - 2; int d2 = p->dim - 1; int n0 = p->n[d0]; int n1 = p->n[d1]; int n2 = p->n[d2]; int ofsx0 = p->ofsx[d0]; int ofsy0 = p->ofsy[d0]; int incx0 = p->incx[d0]; int incy0 = p->incy[d0]; int ofsx1 = p->ofsx[d1]; int ofsy1 = p->ofsy[d1]; int incx1 = p->incx[d1]; int incy1 = p->incy[d1]; int ofsx2 = p->ofsx[d2]; int ofsy2 = p->ofsy[d2]; int incx2 = p->incx[d2]; int incy2 = p->incy[d2]; int posx0 = p->posx + ofsx0; int posy0 = p->posy + ofsy0; #pragma omp parallel for schedule(static) for (int i0 = 0; i0 < n0; i0++) { int posx1 = posx0 + ofsx1 + i0 * incx0; int posy1 = posy0 + ofsy1 + i0 * incy0; for (int i1 = 0; i1 < n1; i1++) { int posx2 = posx1 + ofsx2; int posy2 = posy1 + ofsy2; for (int i2 = 0; i2 < n2; i2++) { MAPFUN (*(x + posx2), *(y + posy2)); posx2 += incx2; posy2 += incy2; } posx1 += incx1; posy1 += incy1; } } } // Level 4 optimisation void FUNCTION (c, slice_4) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int d0 = p->dim - 4; int d1 = p->dim - 3; int d2 = p->dim - 2; int d3 = p->dim - 1; int n0 = p->n[d0]; int n1 = p->n[d1]; int n2 = p->n[d2]; int n3 = p->n[d3]; int ofsx0 = p->ofsx[d0]; int ofsy0 = p->ofsy[d0]; int incx0 = p->incx[d0]; int incy0 = p->incy[d0]; int ofsx1 = p->ofsx[d1]; int ofsy1 = p->ofsy[d1]; int incx1 = p->incx[d1]; int incy1 = p->incy[d1]; int ofsx2 = p->ofsx[d2]; int ofsy2 = p->ofsy[d2]; int incx2 = p->incx[d2]; int incy2 = p->incy[d2]; int ofsx3 = p->ofsx[d3]; int ofsy3 = p->ofsy[d3]; int incx3 = p->incx[d3]; int incy3 = p->incy[d3]; int posx0 = p->posx + ofsx0; int posy0 = p->posy + ofsy0; #pragma omp parallel for schedule(static) for (int i0 = 0; i0 < n0; i0++) { int posx1 = posx0 + ofsx1 + i0 * incx0; int posy1 = posy0 + ofsy1 + i0 * incy0; for (int i1 = 0; i1 < n1; i1++) { int posx2 = posx1 + ofsx2; int posy2 = posy1 + ofsy2; for (int i2 = 0; i2 < n2; i2++) { int posx3 = posx2 + ofsx3; int posy3 = posy2 + ofsy3; for (int i3 = 0; i3 < n3; i3++) { MAPFUN (*(x + posx3), *(y + posy3)); posx3 += incx3; posy3 += incy3; } posx2 += incx2; posy2 += incy2; } posx1 += incx1; posy1 += incy1; } } } // slice x based on the basic slice definition and save to y. void FUNCTION (c, slice) (struct slice_pair *p) { if (p->dep == p->dim - 1) FUNCTION (c, slice_1) (p); else if (p->dep == p->dim - 2) FUNCTION (c, slice_2) (p); else if (p->dep == p->dim - 3) FUNCTION (c, slice_3) (p); else if (p->dep == p->dim - 4) FUNCTION (c, slice_4) (p); else { const int d = p->dep; const int n = p->n[d]; const int incx = p->incx[d]; const int incy = p->incy[d]; const int save_posx = p->posx; const int save_posy = p->posy; p->posx += p->ofsx[d]; p->posy += p->ofsy[d]; for (int i = 0; i < n; i++) { p->dep += 1; FUNCTION (c, slice) (p); p->dep -= 1; p->posx += incx; p->posy += incy; } p->posx = save_posx; p->posy = save_posy; } } // stub function CAMLprim value FUNCTION (stub, slice) (value vX, value vY, value vZ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; struct caml_ba_array *Z = Caml_ba_array_val(vZ); int64_t *slice = (int64_t *) Z->data; struct slice_pair * sp = calloc(1, sizeof(struct slice_pair)); sp->dim = X->num_dims; sp->dep = 0; sp->n = Y->dim; sp->x = X_data; sp->y = Y_data; sp->posx = 0; sp->posy = 0; sp->ofsx = calloc(sp->dim, sizeof(int)); sp->ofsy = calloc(sp->dim, sizeof(int)); sp->incx = calloc(sp->dim, sizeof(int)); sp->incy = calloc(sp->dim, sizeof(int)); c_slicing_offset(X, slice, sp->ofsx); c_slicing_stride(X, slice, sp->incx); c_ndarray_stride(Y, sp->incy); FUNCTION (c, slice) (sp); free(sp->ofsx); free(sp->ofsy); free(sp->incx); free(sp->incy); free(sp); return Val_unit; } #endif /* OWL_ENABLE_TEMPLATE */
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is trail-allocated. unsigned ResultKind : 2; /// Kind of Result as defined by APValue::Kind unsigned APValueKind : 4; /// When ResultKind == RSK_Int64. whether the trail-allocated integer is /// signed. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated /// integer. 7 bits because it is the minimal number of bit to represent a /// value from 0 to 64 (the size of the trail-allocated number). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the /// destructor on the trail-allocated APValue. unsigned HasCleanup : 1; /// Whether this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 14; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
threading_utils.h
/*! * Copyright 2015-2019 by Contributors * \file common.h * \brief Threading utilities */ #ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <dmlc/omp.h> #include <algorithm> #include <limits> #include <type_traits> // std::is_signed #include <vector> #include "xgboost/logging.h" #if !defined(_OPENMP) extern "C" { inline int32_t omp_get_thread_limit() __GOMP_NOTHROW { return 1; } // NOLINT } #endif // !defined(_OPENMP) // MSVC doesn't implement the thread limit. #if defined(_OPENMP) && defined(_MSC_VER) extern "C" { inline int32_t omp_get_thread_limit() { return std::numeric_limits<int32_t>::max(); } // NOLINT } #endif // defined(_MSC_VER) namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template <typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); CHECK_GE(nthreads, 1); dmlc::OMPException exc; #pragma omp parallel num_threads(nthreads) { exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } exc.Rethrow(); } /** * OpenMP schedule */ struct Sched { enum { kAuto, kDynamic, kStatic, kGuided, } sched; size_t chunk{0}; Sched static Auto() { return Sched{kAuto}; } Sched static Dyn(size_t n = 0) { return Sched{kDynamic, n}; } Sched static Static(size_t n = 0) { return Sched{kStatic, n}; } Sched static Guided() { return Sched{kGuided}; } }; template <typename Index, typename Func> void ParallelFor(Index size, int32_t n_threads, Sched sched, Func fn) { #if defined(_MSC_VER) // msvc doesn't support unsigned integer as openmp index. using OmpInd = std::conditional_t<std::is_signed<Index>::value, Index, omp_ulong>; #else using OmpInd = Index; #endif OmpInd length = static_cast<OmpInd>(size); dmlc::OMPException exc; switch (sched.sched) { case Sched::kAuto: { #pragma omp parallel for num_threads(n_threads) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } case Sched::kDynamic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(dynamic) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(dynamic, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kStatic: { if (sched.chunk == 0) { #pragma omp parallel for num_threads(n_threads) schedule(static) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } else { #pragma omp parallel for num_threads(n_threads) schedule(static, sched.chunk) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } } break; } case Sched::kGuided: { #pragma omp parallel for num_threads(n_threads) schedule(guided) for (OmpInd i = 0; i < length; ++i) { exc.Run(fn, i); } break; } } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, size_t n_threads, Func fn) { ParallelFor(size, n_threads, Sched::Static(), fn); } // FIXME(jiamingy): Remove this function to get rid of `omp_set_num_threads`, which sets a // global variable in runtime and affects other programs in the same process. template <typename Index, typename Func> void ParallelFor(Index size, Func fn) { ParallelFor(size, omp_get_max_threads(), Sched::Static(), fn); } // !defined(_OPENMP) inline int32_t OmpGetThreadLimit() { int32_t limit = omp_get_thread_limit(); CHECK_GE(limit, 1) << "Invalid thread limit for OpenMP."; return limit; } /* \brief Configure parallel threads. * * \param p_threads Number of threads, when it's less than or equal to 0, this function * will change it to number of process on system. * * \return Global openmp max threads before configuration. */ inline int32_t OmpSetNumThreads(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = omp_get_num_procs(); } threads = std::min(threads, OmpGetThreadLimit()); omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpSetNumThreadsWithoutHT(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = nthread_original; } threads = std::min(threads, OmpGetThreadLimit()); omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpGetNumThreads(int32_t n_threads) { if (n_threads <= 0) { n_threads = std::min(omp_get_num_procs(), omp_get_max_threads()); } n_threads = std::min(n_threads, OmpGetThreadLimit()); n_threads = std::max(n_threads, 1); return n_threads; } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
DRB044-adi-tile-no.c
/** * adi.c: This file is part of the PolyBench/C 3.2 test suite. * Alternating Direction Implicit solver with tiling and nested SIMD. * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include "omprace.h" #include <omp.h> #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include "polybench/polybench.h" /* Include benchmark-specific header. */ /* Default data type is double, default size is 10x1024x1024. */ #include "polybench/adi.h" /* Array initialization. */ static void init_array(int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0]) { //int i; //int j; { int c1; int c3; int c2; int c4; if (n >= 1) { #pragma omp parallel for private(c4, c2, c3) for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) { X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n; A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n; B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n; } } } } } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n,double X[500 + 0][500 + 0]) { int i; int j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr,"%0.2lf ",X[i][j]); if ((i * 500 + j) % 20 == 0) fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_adi(int tsteps,int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0]) { //int t; //int i1; //int i2; //#pragma scop { int c0; int c2; int c8; int c9; int c15; if (n >= 1 && tsteps >= 1) { for (c0 = 0; c0 <= tsteps + -1; c0++) { if (n >= 2) { #pragma omp parallel for private(c15, c9, c8) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1]; } } } for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1]; } } } for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) { for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9]; } } } } } #pragma omp parallel for private(c15) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1]; } } if (n >= 2) { #pragma omp parallel for private(c15, c9, c8) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15]; } } } for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15]; } } } for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) { for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15]; } } } } } #pragma omp parallel for private(c15) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { #pragma omp simd for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15]; } } } } } //#pragma endscop } int main(int argc,char **argv) { omprace_init(); /* Retrieve problem size. */ int n = 500; int tsteps = 10; /* Variable declaration/allocation. */ double (*X)[500 + 0][500 + 0]; X = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; double (*A)[500 + 0][500 + 0]; A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; double (*B)[500 + 0][500 + 0]; B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; /* Initialize array(s). */ init_array(n, *X, *A, *B); /* Start timer. */ polybench_timer_start(); ; /* Run kernel. */ kernel_adi(tsteps,n, *X, *A, *B); /* Stop and print timer. */ polybench_timer_stop(); ; polybench_timer_print(); ; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ if (argc > 42 && !strcmp(argv[0],"")) print_array(n, *X); /* Be clean. */ free(((void *)X)); ; free(((void *)A)); ; free(((void *)B)); ; omprace_fini(); return 0; }
GB_unaryop__abs_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_int32 // op(A') function: GB_tran__abs_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_int32 ( bool *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
/* * Copyright (c) 2009, 2010, 2011, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. */ #include <assert.h> #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include <barrelfish/barrelfish.h> #include <bench/bench.h> #include <trace/trace.h> #include <trace_definitions/trace_defs.h> #include <inttypes.h> #define STACK_SIZE (64 * 1024) int main(int argc, char *argv[]) { volatile uint64_t workcnt = 0; int nthreads; debug_printf("bomptest started.\n"); bench_init(); #if CONFIG_TRACE errval_t err = trace_control(TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0), TRACE_EVENT(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0), 0); assert(err_is_ok(err)); #endif if(argc == 2) { nthreads = atoi(argv[1]); bomp_bomp_init(nthreads); omp_set_num_threads(nthreads); } else { assert(!"Specify number of threads"); } trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_START, 0); uint64_t start = bench_tsc(); debug_printf("bomp_test: parallel loop"); #pragma omp parallel while(rdtsc() < start + 805000000ULL) { workcnt++; } uint64_t end = bench_tsc(); trace_event(TRACE_SUBSYS_ROUTE, TRACE_EVENT_ROUTE_BENCH_STOP, 0); printf("done. time taken: %" PRIu64 " cycles.\n", end - start); uint32_t *src = calloc(1024, sizeof(uint32_t)); uint32_t *dst = calloc(1024, sizeof(uint32_t)); printf("Test 2...\n"); for (int i = 0; i < 1024; ++i) { src[i] = i+1; } for (int i = 0; i < 1024; ++i) { assert(src[i] != dst[i]); } printf("parallel for..\n"); #pragma omp parallel for (int i = 0; i < 1024; ++i) { dst[i] = src[i]; } printf("Verification..."); for (int i = 0; i < 1024; ++i) { assert(src[i] == dst[i]); } printf("OK.\n"); #if CONFIG_TRACE char *buf = malloc(4096*4096); trace_dump(buf, 4096*4096, NULL); printf("%s\n", buf); #endif for(;;); return 0; }
lloyd_parallel_partitioner.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Carlos A. Roig // #if !defined(KRATOS_LLOYD_PARALLEL_PARTITIONER_H_INCLUDED) #define KRATOS_LLOYD_PARALLEL_PARTITIONER_H_INCLUDED // System includes #include <string> #include <iostream> #include <cmath> #include <algorithm> #include <time.h> #include <stdio.h> #include <stdlib.h> // Project includes #include "mpi.h" #include "spatial_containers/tree.h" #include "spatial_containers/cell.h" // Application includes #include "custom_utilities/bins_dynamic_objects_mpi.h" #include "processes/graph_coloring_process.h" // Graph coloring #include "processes/graph_coloring_process.h" // TODO: This procedure seems unused. Maybe can be removed. int compareFunction(const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ template<class TConfigure> class LloydParallelPartitioner { public: ///@name Type Definitions ///@{ enum { Dimension = TConfigure::Dimension }; // Point typedef TConfigure Configure; typedef typename Configure::PointType PointType; // typedef typename TConfigure::ResultNumberIteratorType ResultNumberIteratorType; // Container typedef typename Configure::PointerType PointerType; typedef typename Configure::ContainerType ContainerType; typedef typename ContainerType::iterator IteratorType; typedef typename Configure::DistanceIteratorType DistanceIteratorType; typedef typename Configure::ResultContainerType ResultContainerType; typedef typename Configure::ElementsContainerType ElementsContainerType; // typedef typename Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef typename Configure::PointerContactType PointerContactType; // typedef typename Configure::PointerTypeIterator PointerTypeIterator; typedef GlobalPointersVector<Element> ParticleWeakVector; // Search Structures typedef Cell<Configure> CellType; typedef std::vector<CellType> CellContainerType; typedef typename CellContainerType::iterator CellContainerIterator; typedef TreeNode<Dimension, PointType, PointerType, IteratorType, typename Configure::DistanceIteratorType> TreeNodeType; typedef typename TreeNodeType::CoordinateType CoordinateType; // double typedef typename TreeNodeType::SizeType SizeType; // std::size_t typedef typename TreeNodeType::IndexType IndexType; // std::size_t typedef Tvector<IndexType,Dimension> IndexArray; typedef Tvector<SizeType,Dimension> SizeArray; typedef Tvector<CoordinateType,Dimension> CoordinateArray; ///Contact Pair typedef typename Configure::ContainerContactType ContainerContactType; typedef typename Configure::IteratorContactType IteratorContactType; ///typedef TreeNodeType LeafType; typedef typename TreeNodeType::IteratorIteratorType IteratorIteratorType; typedef typename TreeNodeType::SearchStructureType SearchStructureType; // Graph coloring process type typedef typename GraphColoringProcess::GraphType GraphType; /// Pointer definition of BinsObjectDynamic KRATOS_CLASS_POINTER_DEFINITION(LloydParallelPartitioner); ///@} ///@name Life Cycle ///@{ /// Default constructor. LloydParallelPartitioner(IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd) : mNumberOfObjects(ObjectsEnd-ObjectsBegin), mObjectsBegin(ObjectsBegin), mObjectsEnd(ObjectsEnd) { MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); // std::cout << "Begining partitioning" << std::endl; mpPartitionBins = new BinsObjectDynamicMpi<TConfigure>(mObjectsBegin, mObjectsEnd); mNumberOfCells = mpPartitionBins->GetCellContainer().size(); if(mNumberOfCells < mpi_size) { KRATOS_ERROR << "Error: Number of cells in the bins must be at least equal to mpi_size. " << mNumberOfCells << std::endl; } if(mNumberOfCells % mpi_size) { // KRATOS_WARNING << "Warning: Number of cells is not multiple of mpi_size. Heavy imbalance may occur." << std::endl; std::cout << "Warning: Number of cells is not multiple of mpi_size. Heavy imbalance may occur. " << mNumberOfCells << std::endl; } if(mNumberOfCells < 10 * mpi_size) { // KRATOS_WARNING << "Warning: Number of cells is small. Partition Shape may be sub-optimal." << std::endl; std::cout << "Warning: Number of cells is small. Partition Shape may be sub-optimal. " << mNumberOfCells << std::endl; } } double ReduceMaxRadius(IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd) { // Max Radius Ugly fix double local_max_radius = 0.0f; double max_radius = 0.0f; for (IteratorType ObjectItr = ObjectsBegin; ObjectItr != ObjectsEnd; ObjectItr++) { const double Radius = TConfigure::GetObjectRadius(*ObjectItr, 0.0f); if(Radius > local_max_radius) local_max_radius = Radius; } MPI_Allreduce(&local_max_radius, &max_radius, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); return max_radius; } void SerialPartition() { std::vector<int> mpiSendObjectsPerCell(mNumberOfCells, 0); std::vector<int> mpiRecvObjectsPerCell(mNumberOfCells, 0); std::vector<int> CellPartition(mNumberOfCells, 0); std::vector<int> ObjectsPerPartition(mpi_size, 0); int mpiSendNumberOfObjects = mObjectsEnd - mObjectsBegin; int mpiRecvNumberOfObjects = 0; PointType ObjectCenter; PointType Low, High; SearchStructureType Box; // Calculate objects per cell for(std::size_t i = 0; i < (std::size_t)mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto cellId = mpPartitionBins->CalculateIndex(ObjectCenter); mpiSendObjectsPerCell[cellId]++; } MPI_Allreduce(&mpiSendNumberOfObjects, &mpiRecvNumberOfObjects, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&mpiSendObjectsPerCell[0], &mpiRecvObjectsPerCell[0], mNumberOfCells, MPI_INT, MPI_SUM, MPI_COMM_WORLD); //int MeanObjectsPerPartition = mpiRecvNumberOfObjects / mpi_size; // std::cout << "mpiRecvNumberOfObjects: " << mpiRecvNumberOfObjects << " MeanObjectsPerPartition: " << MeanObjectsPerPartition << std::endl; // Assing each cell to the closest partition center // TODO: this is currently very unbalanced for(std::size_t cellId = 0; cellId < (std::size_t) mNumberOfCells; cellId++) { ObjectsPerPartition[cellId] += mpiRecvObjectsPerCell[cellId]; CellPartition[cellId] = cellId; } std::cout << "Partititon " << mpi_rank << ": " << ObjectsPerPartition[mpi_rank] << std::endl; // Assign the partition to the objects based on their cell for(std::size_t i = 0; i < (std::size_t) mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto cellId = mpPartitionBins->CalculateIndex(ObjectCenter); (*ObjectItr)->GetValue(PARTITION_INDEX) = CellPartition[cellId]; for (unsigned int j = 0; j < (*ObjectItr)->GetGeometry().PointsNumber(); j++) { ModelPart::NodeType::Pointer NodePtr = (*ObjectItr)->GetGeometry().pGetPoint(j); NodePtr->FastGetSolutionStepValue(PARTITION_INDEX) = CellPartition[cellId]; } } // std::cout << "Ending partitioning" << std::endl; } void VoronoiiPartition() { std::vector<int> mpiSendObjectsPerCell(mNumberOfCells, 0); std::vector<int> mpiRecvObjectsPerCell(mNumberOfCells, 0); std::vector<int> CellPartition(mNumberOfCells, 0); std::vector<double> CellDistances(mNumberOfCells, std::numeric_limits<double>::max()); std::vector<double> mpiSendCellCenter(mNumberOfCells * Dimension, 0.0f); std::vector<double> mpiRecvCellCenter(mNumberOfCells * Dimension, 0.0f); std::vector<int> CellsPerPartition(mpi_size, 0); std::vector<int> ObjectsPerPartition(mpi_size, 0); std::vector<PointType> PartitionCenters(mpi_size); std::vector<double> mpiSendPartCenter(mpi_size * Dimension, 0.0f); std::vector<double> mpiRecvPartCenter(mpi_size * Dimension, 0.0f); std::vector<int> mpiSendPartNum(mpi_size, 0); std::vector<int> mpiRecvPartNum(mpi_size, 0); PointType ObjectCenter; // 1 - Calculate the centers of the cells based on the objects inside // TODO: Parallelize this (non-trivial) for(std::size_t i = 0; i < mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto CellIndex = mpPartitionBins->CalculateIndex(ObjectCenter); mpiSendObjectsPerCell[CellIndex]++; for(int d = 0; d < Dimension; d++) { mpiSendCellCenter[CellIndex*Dimension+d] += ObjectCenter[d]; } } // 1.1 - Communicate the number of objects per cell and the local sum of object coordinates MPI_Allreduce(&mpiSendObjectsPerCell[0], &mpiRecvObjectsPerCell[0], mNumberOfCells * Dimension, MPI_INT, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&mpiSendCellCenter[0], &mpiRecvCellCenter[0], mNumberOfCells * Dimension, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); // 1.2 - Obtain the wheighted center of each cell with the data of all processes #pragma omp parallel for for(std::size_t cellId = 0; cellId < mNumberOfCells; cellId++) { for(int d = 0; d < Dimension; d++) { mpiRecvCellCenter[cellId*Dimension+d] /= mpiRecvObjectsPerCell[cellId]; } } // 2 - Assign a random origin to each partition. auto minPoint = mpPartitionBins->GetMinPoint(); auto maxPoint = mpPartitionBins->GetMaxPoint(); auto boxSize = maxPoint - minPoint; // Change this if we want real random // !!!!!MAKE SURE THIS IS THE SAME ON EVERY PARTITION OR IT WON'T WORK!!!!! std::srand(256); for(int i = 0; i < mpi_size; i++) { for(int d = 0; d < Dimension; d++) { PartitionCenters[i][d] = minPoint[d] + ((double)std::rand() / (double)RAND_MAX) * boxSize[d]; } } // While not converged auto MaxIterations = 1e1; for(std::size_t iterations = 0; iterations < MaxIterations; iterations++ ) { // Assing each cell to the closest partition center for(std::size_t cellId = 0; cellId < mNumberOfCells; cellId++) { if(mpiRecvObjectsPerCell[cellId] != 0) { for(int i = 0; i < mpi_size; i++) { double cubeDistance = 0.0f; for(int d = 0; d < Dimension; d++) { // Manhattan distance shoudl prevent problems with the discretization of the space cubeDistance += std::abs(mpiRecvCellCenter[cellId*Dimension+d] - PartitionCenters[i][d]); } if(cubeDistance < CellDistances[cellId]) { CellDistances[cellId] = cubeDistance; CellPartition[cellId] = i; } } } } // At this point no synch should be needed // Update the center of the partitions for(int i = 0; i < mpi_size; i++) { CellsPerPartition[i] = 0; ObjectsPerPartition[i] = 0; for(int d = 0; d < Dimension; d++) { PartitionCenters[i][d] = 0.0f; } } // if(mpi_rank == 0) { // std::cout << mNumberOfCells << std::endl; // } for(std::size_t cellId = 0; cellId < mNumberOfCells; cellId++) { if(mpiRecvObjectsPerCell[cellId] != 0) { CellsPerPartition[CellPartition[cellId]]++; ObjectsPerPartition[CellPartition[cellId]] += mpiRecvObjectsPerCell[cellId]; for(int d = 0; d < Dimension; d++) { PartitionCenters[CellPartition[cellId]][d] += mpiRecvCellCenter[cellId*Dimension+d]; } } // if(mpi_rank == 0) { // for(int i = 0; i < mpi_size; i++) { // std::cout << "Iteration: " << cellId << " Partition " << i << " has " << CellsPerPartition[i] << " Cells" << std::endl; // } // } } for(std::size_t partId = 0; partId < mpi_size; partId++) { for(int d = 0; d < Dimension; d++) { PartitionCenters[partId][d] /= CellsPerPartition[partId]++; } } } if(mpi_rank == 0) { std::cout << mNumberOfCells << std::endl; for(int i = 0; i < mpi_size; i++) { std::cout << "Partition " << i << " has " << CellsPerPartition[i] << " Cells" << std::endl; } } // Assign the partition to the objects based on their cell for(std::size_t i = 0; i < mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto CellIndex = mpPartitionBins->CalculateIndex(ObjectCenter); (*ObjectItr)->GetValue(PARTITION_INDEX) = CellPartition[CellIndex]; for (unsigned int i = 0; i < (*ObjectItr)->GetGeometry().PointsNumber(); i++) { ModelPart::NodeType::Pointer NodePtr = (*ObjectItr)->GetGeometry().pGetPoint(i); NodePtr->FastGetSolutionStepValue(PARTITION_INDEX) = CellPartition[CellIndex]; } } std::cout << "Ending partitioning" << std::endl; } void UpdateDomainGraph(IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd, GraphType & domainGraph) { PointType ObjectCenter; PointType Low, High; SearchStructureType Box; mObjectsBegin = ObjectsBegin; mObjectsEnd = ObjectsEnd; mNumberOfObjects = ObjectsEnd-ObjectsBegin; // Rebuild the bins free(mpPartitionBins); mpPartitionBins = new BinsObjectDynamicMpi<TConfigure>(mObjectsBegin, mObjectsEnd); // Assign the partition to the objects based on their cell double maxRadius = ReduceMaxRadius(mObjectsBegin, mObjectsEnd); for(std::size_t i = 0; i < (std::size_t) mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateBoundingBox(*ObjectItr, Low, High); for(int i = 0; i < Dimension; i++) { Low[i] -= maxRadius; High[i] += maxRadius; } Box.Set( mpPartitionBins->CalculateCell(Low), mpPartitionBins->CalculateCell(High), mpPartitionBins->GetDivisions()); std::unordered_set<std::size_t> partitionSet; auto ObjectRadius = TConfigure::GetObjectRadius(*ObjectItr, 0.0f); mpPartitionBins->SearchPartitionInRadius(Box, *ObjectItr, partitionSet, ObjectRadius); std::vector<std::size_t> partitionList(partitionSet.begin(), partitionSet.end()); for(unsigned int i = 0; i < partitionList.size(); i++) { domainGraph(mpi_rank, mpi_rank) = 1; domainGraph(partitionList[i], mpi_rank) = 1; domainGraph(mpi_rank, partitionList[i]) = 1; } } } /// Destructor. virtual ~LloydParallelPartitioner() { delete mpPartitionBins; } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name MPI Variables ///@{ int mpi_rank; int mpi_size; int mNumberOfObjects; int mNumberOfCells; IteratorType mObjectsBegin; IteratorType mObjectsEnd; BinsObjectDynamicMpi<TConfigure> * mpPartitionBins; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void CreatePartition(SizeType number_of_threads, const SizeType number_of_rows, std::vector<SizeType>& partitions) { partitions.resize(number_of_threads+1); SizeType partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(SizeType i = 1; i<number_of_threads; i++) { partitions[i] = partitions[i-1] + partition_size; } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} public: /// Assignment operator. LloydParallelPartitioner<TConfigure> & operator=(const LloydParallelPartitioner<TConfigure> & rOther) { mObjectsBegin = rOther.mObjectsBegin; mObjectsEnd = rOther.mObjectsEnd; mpPartitionBins = rOther.mpPartitionBins; return *this; } /// Copy constructor. LloydParallelPartitioner(const LloydParallelPartitioner& rOther) { *this = rOther; } }; // Class BinsObjectDynamic ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TConfigure> inline std::istream& operator >> (std::istream& rIStream, LloydParallelPartitioner<TConfigure>& rThis) { return rIStream; } /// output stream function template<class TConfigure> inline std::ostream& operator << (std::ostream& rOStream, const LloydParallelPartitioner<TConfigure> & rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_LLOYD_PARALLEL_PARTITIONER_H_INCLUDED defined
Example_private.2.c
/* * @@name: private.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ int a; void g(int k) { a = k; /* Accessed in the region but outside of the construct; * therefore unspecified whether original or private list * item is modified. */ } void f(int n) { int a = 0; #pragma omp parallel for private(a) for (int i=1; i<n; i++) { a = i; g(a*2); /* Private copy of "a" */ } }
convert_ell_x_coo.c
#include "alphasparse/format.h" #include <stdlib.h> #include <alphasparse/opt.h> #include <alphasparse/util.h> #include <memory.h> #include <stdio.h> static void print_coo_s(const spmat_coo_s_t *mat) { printf("nnz:%d, cols:%d, rows:%d\n", mat->nnz, mat->cols, mat->rows); for (ALPHA_INT i = 0; i < mat->nnz; i++) { printf("#%d, val:%f, row:%d, col:%d\n", i, mat->values[i], mat->row_indx[i], mat->col_indx[i]); } printf("=====================================\n\n"); } static void print_ell_s(const spmat_ell_s_t *mat) { printf("ld:%d, cols:%d, rows:%d\n", mat->ld, mat->cols, mat->rows); for(ALPHA_INT i = 0; i < mat->ld; i++) { for(ALPHA_INT j = 0; j < mat->rows; j++) { printf("%f ", mat->values[i*mat->rows + j]); } printf("\n"); } printf("=====================================\n\n"); } alphasparse_status_t ONAME(const ALPHA_SPMAT_COO *source, ALPHA_SPMAT_ELL **dest) { ALPHA_SPMAT_ELL *mat = alpha_malloc(sizeof(ALPHA_SPMAT_ELL)); *dest = mat; ALPHA_SPMAT_CSR *csr; convert_csr_coo(source, &csr); ALPHA_INT m = csr->rows; ALPHA_INT n = csr->cols; mat->rows = m; mat->cols = n; ALPHA_INT ld = 0; for (ALPHA_INT i = 0; i < m; i++) { ALPHA_INT row_nnz = csr->rows_end[i] - csr->rows_start[i]; ld = ld > row_nnz ? ld : row_nnz; } mat->ld = ld; if((uint64_t )ld * m >= 1l<<31){ fprintf(stderr,"nnz nums overflow!!!:%ld\n",(uint64_t )ld * m); exit(EXIT_FAILURE); } ALPHA_Number *values = alpha_memalign((uint64_t)ld * m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); ALPHA_INT *indices = alpha_memalign((uint64_t)ld * m * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); memset(values,0,(uint64_t)ld * m * sizeof(ALPHA_Number)); memset(indices,0,(uint64_t)ld * m * sizeof(ALPHA_INT)); const ALPHA_INT thread_num = alpha_get_thread_num(); // i列j行, 列优先 #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < ld; i++) { for (ALPHA_INT j = 0; j < m; j++) { ALPHA_INT csr_rs = csr->rows_start[j]; ALPHA_INT csr_re = csr->rows_end[j]; if (csr_rs + i < csr_re) { values[i * m + j] = csr->values[csr_rs + i]; indices[i * m + j] = csr->col_indx[csr_rs + i]; } } } mat->values = values; mat->indices = indices; // #ifndef COMPLEX // #ifndef DOUBLE // print_ell_s(mat); // #endif // #endif mat->d_values = NULL; mat->d_indices = NULL; return ALPHA_SPARSE_STATUS_SUCCESS; }
pregpu.h
#ifndef pregpu_h #define pregpu_h #include <omp.h> static size_t keysDevcSize = 0; // Size of offsets for rangeHost static size_t rangeDevcSize = 0; // Size of offsets for sourceHost static size_t sourceDevcSize = 0; // Size of sources static size_t targetDevcSize = 0; // Size of targets static int *keysDevc; // Keys on device static int *rangeDevc; // Ranges on device static gpureal *sourceDevc; // Sources on device static gpureal *targetDevc; // Targets on device #pragma omp threadprivate(keysDevcSize,rangeDevcSize,sourceDevcSize,targetDevcSize) #pragma omp threadprivate(keysDevc,rangeDevc,sourceDevc,targetDevc) __device__ __constant__ gpureal constDevc[1]; // Constants on device namespace { // Limit scope of the following functions to nvcc __device__ void cart2sph(gpureal& r, gpureal& theta, gpureal& phi,// Get r,theta,phi from x,y,z on GPU gpureal dx, gpureal dy, gpureal dz) { r = sqrtf(dx * dx + dy * dy + dz * dz)+EPS; // r = sqrt(x^2 + y^2 + z^2) + eps theta = acosf(dz / r); // theta = acos(z / r) if( fabs(dx) + fabs(dy) < EPS ) { // If |x| < eps & |y| < eps phi = 0; // phi can be anything so we set it to 0 } else if( fabs(dx) < EPS ) { // If |x| < eps phi = dy / fabs(dy) * M_PI * 0.5; // phi = sign(y) * pi / 2 } else if( dx > 0 ) { // If x > 0 phi = atanf(dy / dx); // phi = atan(y / x) } else { // If x < 0 phi = atanf(dy / dx) + M_PI; // phi = atan(y / x) + pi } // End if for x,y cases } __device__ void sph2cart(gpureal r, gpureal theta, gpureal phi, // Spherical to cartesian coordinates on GPU gpureal *spherical, gpureal *cartesian) { cartesian[0] = sinf(theta) * cosf(phi) * spherical[0] // x component (not x itself) + cosf(theta) * cosf(phi) / r * spherical[1] - sinf(phi) / r / sinf(theta) * spherical[2]; cartesian[1] = sinf(theta) * sinf(phi) * spherical[0] // y component (not y itself) + cosf(theta) * sinf(phi) / r * spherical[1] + cosf(phi) / r / sinf(theta) * spherical[2]; cartesian[2] = cosf(theta) * spherical[0] // z component (not z itself) - sinf(theta) / r * spherical[1]; } __device__ void evalMultipole(gpureal *YnmShrd, gpureal rho, // Evaluate solid harmonics r^n * Ynm on GPU gpureal alpha, gpureal *factShrd) { gpureal x = cosf(alpha); // x = cos(alpha) gpureal y = sinf(alpha); // y = sin(alpha) gpureal fact = 1; // Initialize 2 * m + 1 gpureal pn = 1; // Initialize Legendre polynomial Pn gpureal rhom = 1; // Initialize rho^m for( int m=0; m<P; ++m ){ // Loop over m in Ynm gpureal p = pn; // Associate Legendre polynomial Pnm int npn = m * m + 2 * m; // Index of Ynm for m > 0 int nmn = m * m; // Index of Ynm for m < 0 YnmShrd[npn] = rhom * p / factShrd[2*m]; // rho^m * Ynm for m > 0 YnmShrd[nmn] = YnmShrd[npn]; // Use conjugate relation for m < 0 gpureal p1 = p; // Pnm-1 p = x * (2 * m + 1) * p; // Pnm using recurrence relation rhom *= -rho; // rho^m gpureal rhon = rhom; // rho^n for( int n=m+1; n<P; ++n ){ // Loop over n in Yn int npm = n * n + n + m; // Index of Ynm for m > 0 int nmm = n * n + n - m; // Index of Ynm for m < 0 YnmShrd[npm] = rhon * p / factShrd[n+m]; // rho^n * Ynm YnmShrd[nmm] = YnmShrd[npm]; // Use conjugate relation for m < 0 gpureal p2 = p1; // Pnm-2 p1 = p; // Pnm-1 p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); // Pnm using recurrence relation rhon *= -rho; // Update rho^n } // End loop over n in Ynm pn = -pn * fact * y; // Pn fact += 2; // 2 * m + 1 } // End loop over m in Ynm } __device__ void evalLocal(gpureal *YnmShrd, gpureal rho, // Evaluate singular harmonics r^(-n-1) * Ynm gpureal alpha, gpureal *factShrd) { gpureal x = cosf(alpha); // x = cos(alpha) gpureal y = sinf(alpha); // y = sin(alpha) gpureal rho_1 = 1 / rho; // 1 / rho for( int l=threadIdx.x; l<(2*P+1)*P; l+=THREADS ) { // Loop over coefficients in Ynm gpureal fact = 1; // Initialize 2 * m + 1 gpureal pn = 1; // Initialize Legendre polynomial Pn gpureal rhom = rho_1; // Initialize rho^(-m-1) int nn = floor(sqrtf(2*l+0.25)-0.5); // Calculate index n of Ynm int mm = 0; // Initialize index m of Ynm gpureal Ynm; // Define temporary Ynm for( int i=0; i<=nn; ++i ) mm += i; // Offset of m mm = l - mm; // Calculate index m of Ynm int n; // Define temporary n for( int m=0; m<mm; ++m ){ // Loop up to m rhom *= rho_1; // rho^(-m-1) pn = -pn * fact * y; // Pn fact += 2; // 2 * m + 1 } // End loop up to m int m = mm; // Define temporary m gpureal p = pn; // Associated Legendre polynomial Pnm if( mm == nn ) Ynm = rhom * p; // Ynm for n == m gpureal p1 = p; // Pnm-1 p = x * (2 * m + 1) * p; // Pnm rhom *= rho_1; // rho^(-m-1) gpureal rhon = rhom; // rho^(-n-1) for( n=m+1; n<nn; ++n ){ // Loop up to n gpureal p2 = p1; // Pnm-2 p1 = p; // Pnm-1 p = (x * (2 * n + 1) * p1 - (n + m) * p2) / (n - m + 1); // Pnm rhon *= rho_1; // rho^(-n-1) } // End loop up to n if( n <= nn ) Ynm = rhon * p * factShrd[n-m]; // rho^(-n-1) * Ynm YnmShrd[l] = Ynm; // Put Ynm in shared memory } // End loop over coefficients in Ynm __syncthreads(); // Syncronize threads } } // End anonymous namespace #endif
GB_unaryop__abs_bool_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_int8 // op(A') function: GB_tran__abs_bool_int8 // C type: bool // A type: int8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_int8 ( bool *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__div_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__div_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__div_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__div_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fp64) // A*D function (colscale): GB (_AxD__div_fp64) // D*A function (rowscale): GB (_DxB__div_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__div_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__div_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fp64) // C=scalar+B GB (_bind1st__div_fp64) // C=scalar+B' GB (_bind1st_tran__div_fp64) // C=A+scalar GB (_bind2nd__div_fp64) // C=A'+scalar GB (_bind2nd_tran__div_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij / bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x / y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_FP64 || GxB_NO_DIV_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x / bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij / y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x / aij) ; \ } GrB_Info GB (_bind1st_tran__div_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij / y) ; \ } GrB_Info GB (_bind2nd_tran__div_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
schedule.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <unistd.h> #include <time.h> struct timespec start, end; void time_start(); double time_end(); int extra_work(int n); int main(int argc, char *argv[]) { const int NX = 1000000; int nx, low, high, bias; if (argc > 1) { nx = atoi(argv[1]); } else nx = NX; int nthreads = omp_get_num_threads(); bias = 82320; low = nx/10 + bias; high = nx/5 + bias; printf("%i < i and i < %i are slow indices out of %i:\n",low, high, nx); time_start(); #pragma omp parallel { int tid = omp_get_thread_num(); int b = 0; long long m = 0; #pragma omp for schedule(runtime) for (int i = 0; i < nx; i++) { b++; if (low < i & i < high) { m = m + extra_work(nx/100); } } printf("%i outer additions in thread %i, did %li extra work\n", b, tid, m); } time_end(); printf("\n%i is slow thread:\n", 0); time_start(); #pragma omp parallel { int tid = omp_get_thread_num(); int b = 0; long long m = 0; #pragma omp for schedule(runtime) for (int i = 0; i < nx; i++) { b++; if(tid == 0) { m = m + extra_work(nx/100); } } printf("%i outer additions in thread %i, did %li extra work\n", b, tid, m); } time_end(); return 0; } void time_start() { int ierr = clock_gettime(CLOCK_REALTIME, &start); } double time_end() { double time_used; int ierr = clock_gettime(CLOCK_REALTIME, &end); time_used = ((double) (end.tv_nsec-start.tv_nsec)) / 1e9 + (double) (end.tv_sec - start.tv_sec); printf("time: %f s\n", time_used); } void frobnicate(int m) { }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(8*t1+Nx+7,1024)),floord(16*t2+Nx+3,1024)),floord(4*t3+Nx-9,1024)),floord(16*t1-16*t2+Nz+Nx+5,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),256*t4+254);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
linalg_naive.h
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace MLCommon { namespace LinAlg { namespace Naive { /** * @brief CPU sequential version of the Kronecker product * * @note All the matrices are in column-major order * * @tparam DataT Type of the data * @param[out] K Pointer to the result of the Kronecker product A (x) B * @param[in] A Matrix A * @param[in] B Matrix B * @param[in] m Rows of matrix A * @param[in] n Columns of matrix B * @param[in] p Rows of matrix A * @param[in] q Columns of matrix B */ template <typename DataT> void kronecker(DataT *K, const DataT *A, const DataT *B, int m, int n, int p, int q) { int k_m = m * p; #pragma omp parallel for collapse(2) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { DataT a_ij = A[i + m * j]; for (int v = 0; v < p; v++) { for (int w = 0; w < q; w++) { DataT b_vw = B[v + p * w]; K[i * p + v + (j * q + w) * k_m] = a_ij * b_vw; } } } } } /** * @brief CPU sequential matrix multiplication out = alpha * A*B + beta * out * * @note All the matrices are in column-major order * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] A Matrix A * @param[in] B Matrix B * @param[in] m Rows of A * @param[in] k Columns of A / rows of B * @param[in] n Columns of B * @param[in] alpha Scalar alpha * @param[in] beta Scalar beta */ template <typename DataT> void matMul(DataT *out, const DataT *A, const DataT *B, int m, int k, int n, DataT alpha = 1, DataT beta = 0) { #pragma omp parallel for collapse(2) for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { DataT s = 0.0; for (int r = 0; r < k; r++) { s += A[i + r * m] * B[r + j * k]; } out[i + j * m] = alpha * s + beta * out[i + j * m]; } } } /** * @brief CPU sequential vector add (u + alpha * v) * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] u Vector u * @param[in] v Vector v * @param[in] len Length of the vectors to add * @param[in] alpha Coefficient to multiply the elements of v with */ template <typename DataT> void add(DataT *out, const DataT *u, const DataT *v, int len, DataT alpha = 1.0) { #pragma omp parallel for for (int i = 0; i < len; i++) { out[i] = u[i] + alpha * v[i]; } } /** * @brief CPU lagged matrix * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] in Pointer to the input vector * @param[in] len Length or the vector * @param[in] lags Number of lags */ template <typename DataT> void laggedMat(DataT *out, const DataT *in, int len, int lags) { int lagged_len = len - lags; #pragma omp parallel for for (int lag = 1; lag <= lags; lag++) { DataT *out_ = out + (lag - 1) * lagged_len; const DataT *in_ = in + lags - lag; for (int i = 0; i < lagged_len; i++) { out_[i] = in_[i]; } } } /** * @brief CPU matrix 2D copy * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] in Pointer to the input matrix * @param[in] starting_row Starting row * @param[in] starting_col Starting column * @param[in] in_rows Number of rows in the input matrix * @param[in] out_rows Number of rows in the output matrix * @param[in] out_cols Number of columns in the input matrix */ template <typename DataT> void copy2D(DataT *out, const DataT *in, int starting_row, int starting_col, int in_rows, int out_rows, int out_cols) { #pragma omp parallel for collapse(2) for (int i = 0; i < out_rows; i++) { for (int j = 0; j < out_cols; j++) { out[i + j * out_rows] = in[starting_row + i + (starting_col + j) * in_rows]; } } } /** * @brief CPU first difference of a vector * * @tparam DataT Type of the data * @param[out] out Pointer to the result * @param[in] in Pointer to the input vector * @param[in] len Length of the input vector */ template <typename DataT> void diff(DataT *out, const DataT *in, int len) { #pragma omp parallel for for (int i = 0; i < len - 1; i++) { out[i] = in[i + 1] - in[i]; } } } // namespace Naive } // namespace LinAlg } // namespace MLCommon
sieveOfErastotenes.c
/* Adaptado de: https://ideone.com/JU5CfV e https://github.com/stbrumme/eratosthenes --- Medição do tempo usando clock Erastothenes Sequencial: 5761455 / Tempo: 2.04s Erastothenes Paralelo: 5761455 / Tempo: 1.07s Speed up: 1.9 --- Medição do tempo usando comando time Erastothenes Sequencial: 1,95s user 0,03s system 99% cpu 1,977 total Erastothenes Paralelo: 1,53s user 0,00s system 514% cpu 0,299 total Speed up: 1.2 */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> const int blockSize = 128 * 1024; const int n = 100000000; const bool useOpenMP = true; // Processar os numeros impares de blocos especificos int eratosthenesOddSingleBlock(const int from, const int to) { const int memorySize = (to - from + 1) / 2; // initializa char *isPrime = malloc(memorySize * sizeof(char)); for (int i = 0; i < memorySize; i++) isPrime[i] = 1; for (int i = 3; i * i <= to; i += 2) { // pular multiplos de 3 if (i >= 3 * 3 && i % 3 == 0) continue; // pular multiplos de 5 if (i >= 5 * 5 && i % 5 == 0) continue; // pular multiplos de 7 if (i >= 7 * 7 && i % 7 == 0) continue; // pular multiplos de 11 if (i >= 11 * 11 && i % 11 == 0) continue; // pular multiplos de 13 if (i >= 13 * 13 && i % 13 == 0) continue; // pula anteriores a metade int minJ = ((from + i - 1) / i) * i; if (minJ < i * i) minJ = i * i; // valor inicial tem de ser impar if ((minJ & 1) == 0) minJ += i; // encontra todos os nao primos impares for (int j = minJ; j <= to; j += 2 * i) { int index = j - from; isPrime[index / 2] = 0; } } // conta os primos nesse bloco int found = 0; for (int i = 0; i < memorySize; i++) found += isPrime[i]; // 2 nao e' impar if (from <= 2) found++; free(isPrime); return found; } // Fragmentar o crivo em blocos para facilitar o paralelismo int eratosthenesBlockwise(int fim) { // habilitar openmp omp_set_num_threads(useOpenMP ? omp_get_num_procs() : 1); double found = 0; #pragma omp parallel for reduction(+ : found) for (int from = 2; from <= fim; from += blockSize) { int to = from + blockSize; if (to > fim) to = fim; found += eratosthenesOddSingleBlock(from, to); } return found; } int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool *)malloc((n + 1) * sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true, (n + 1) * sizeof(bool)); for (int p = 2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p for (int i = p * 2; i <= n; i += p) prime[i] = false; } } // count prime numbers for (int p = 2; p <= n; p++) if (prime[p]) primes++; return (primes); } int main() { clock_t tStart = clock(); int eratosthenes = sieveOfEratosthenes(n); printf("Erastothenes Sequencial: %d / Tempo: %.2fs\n", eratosthenes, (double)(clock() - tStart) / CLOCKS_PER_SEC); tStart = clock(); int parallelratosthenes = eratosthenesBlockwise(n); printf("Erastothenes Paralelo: %d / Tempo: %.2fs\n", parallelratosthenes, (double)(clock() - tStart) / CLOCKS_PER_SEC); return 0; }
GB_unop__one_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_int32_int32 // op(A') function: GB_unop_tran__one_int32_int32 // C type: int32_t // A type: int32_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_int32_int32 ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rawSHA1_fmt_plug.c
/* * This software is Copyright (c) 2004 bartavelle, <simon at banquise.net>, and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * Optimised set_key() and reduced binary size by magnum, 2012 * * OMP added May 2013, JimF */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawSHA1; extern struct fmt_main fmt_rawSHA1_axcrypt; #elif FMT_REGISTERS_H john_register_one(&fmt_rawSHA1); john_register_one(&fmt_rawSHA1_axcrypt); #else #include <string.h> #include "arch.h" #include "sha.h" #include "common.h" #include "formats.h" #include "base64_convert.h" #include "rawSHA1_common.h" #include "johnswap.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif //#undef SIMD_COEF_32 //#undef SIMD_PARA_SHA1 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define AX_FORMAT 1 #define RAW_FORMAT 2 #define AX_FORMAT_LABEL "Raw-SHA1-AxCrypt" #define FORMAT_LABEL "Raw-SHA1" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE DIGEST_SIZE #define BINARY_ALIGN 4 #ifdef SIMD_COEF_32 #define PLAINTEXT_LENGTH 55 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) //for endianity conversion #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 static uint32_t (*saved_key)[SHA_BUF_SIZ*NBKEYS]; static uint32_t (*crypt_key)[DIGEST_SIZE/4*NBKEYS]; #else static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[DIGEST_SIZE / 4]; #endif static unsigned algo; static unsigned digest_size; static unsigned pos; static unsigned SSEi_flags; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif } #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #undef SSEi_REVERSE_3STEPS #define SSEi_REVERSE_3STEPS 0 #endif static void init_raw(struct fmt_main *self) { algo = RAW_FORMAT; digest_size = DIGEST_SIZE; pos = 4; SSEi_flags = SSEi_REVERSE_STEPS | SSEi_MIXED_IN; init(self); } static void init_ax(struct fmt_main *self) { algo = AX_FORMAT; digest_size = AX_DIGEST_SIZE; pos = 3; SSEi_flags = SSEi_REVERSE_3STEPS | SSEi_MIXED_IN; init(self); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } #ifdef SIMD_COEF_32 #define HASH_OFFSET (index&(SIMD_COEF_32-1))+(((unsigned int)index%NBKEYS)/SIMD_COEF_32)*SIMD_COEF_32*5+pos*SIMD_COEF_32 static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][pos] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][pos] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][pos] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][pos] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][pos] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][pos] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][pos] & PH_MASK_6; } #endif static int binary_hash_0(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((uint32_t*)binary)[pos] & PH_MASK_6; } #ifdef SIMD_COEF_32 static void set_key(char *key, int index) { #if ARCH_ALLOWS_UNALIGNED const uint32_t *wkey = (uint32_t*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const uint32_t *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ? key : strcpy(buf_aligned, key)); #endif uint32_t *keybuffer = &((uint32_t*)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32]; uint32_t *keybuf_word = keybuffer; unsigned int len; uint32_t temp; len = 0; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP(temp | (0x80U << 24)); len+=3; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80000000; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuffer[15*SIMD_COEF_32] = len << 3; } #else static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1); } #endif #ifdef SIMD_COEF_32 static char *get_key(int index) { static char out[PLAINTEXT_LENGTH + 1]; unsigned int i; uint32_t len = ((uint32_t*)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3; for (i=0;i<len;i++) out[i] = ((char*)saved_key)[GETPOS(i, index)]; out[i] = 0; return (char*)out; } #else static char *get_key(int index) { return saved_key[index]; } #endif static void *get_binary(char *ciphertext) { static uint32_t full[DIGEST_SIZE / 4]; unsigned char *realcipher = (unsigned char*)full; memset(full, 0, sizeof(full)); // since ax-crypt 'may' be short. ciphertext += TAG_LENGTH; base64_convert(ciphertext, e_b64_hex, HASH_LENGTH, realcipher, e_b64_raw, sizeof(full), flg_Base64_MIME_TRAIL_EQ, 0); #ifdef SIMD_COEF_32 alter_endianity(realcipher, DIGEST_SIZE); #ifdef REVERSE_STEPS if (algo == RAW_FORMAT) sha1_reverse(full); else sha1_reverse3(full); #endif #endif return (void*)realcipher; } static char *source(char *source, void *binary) { static char hex[CIPHERTEXT_LENGTH + 1] = FORMAT_TAG; uint32_t hash[DIGEST_SIZE / 4]; char *p; int i, j; memcpy(hash, binary, DIGEST_SIZE); /* Un-reverse binary */ #ifdef SIMD_COEF_32 #ifdef REVERSE_STEPS if (algo == RAW_FORMAT) sha1_unreverse(hash); else { hash[4] = 0; sha1_unreverse3(hash); } #endif alter_endianity(hash, DIGEST_SIZE); #else if (algo == AX_FORMAT) hash[4] = 0; #endif #if ARCH_LITTLE_ENDIAN==0 alter_endianity(hash, DIGEST_SIZE); #endif /* Convert to hex string */ p = hex + TAG_LENGTH; for (i = 0; i < 5; i++) for (j = 0; j < 8; j++) *p++ = itoa16[(hash[i] >> ((j ^ 1) * 4)) & 0xf]; *p = 0; return hex; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for for (index = 0; index < loops; ++index) #endif { #if SIMD_COEF_32 SIMDSHA1body(saved_key[index], crypt_key[index], NULL, SSEi_flags); #else SHA_CTX ctx; SHA1_Init( &ctx ); SHA1_Update( &ctx, (unsigned char*) saved_key[index], strlen( saved_key[index] ) ); SHA1_Final( (unsigned char*) crypt_key[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_32 if (((uint32_t*)binary)[pos] == ((uint32_t*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32 + pos*SIMD_COEF_32]) #else if ( ((uint32_t*)binary)[0] == crypt_key[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 return (((uint32_t *) binary)[pos] == ((uint32_t*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32 + pos*SIMD_COEF_32]); #else return !memcmp(binary, crypt_key[index], digest_size); #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_COEF_32 uint32_t crypt_key[DIGEST_SIZE / 4]; SHA_CTX ctx; char *key = get_key(index); SHA1_Init(&ctx); SHA1_Update(&ctx, key, strlen(key)); SHA1_Final((void*)crypt_key, &ctx); alter_endianity(crypt_key, DIGEST_SIZE); #ifdef REVERSE_STEPS if (algo == RAW_FORMAT) sha1_reverse(crypt_key); else sha1_reverse3(crypt_key); #endif return !memcmp(get_binary(source), crypt_key, digest_size); #else return 1; #endif } struct fmt_main fmt_rawSHA1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG, FORMAT_TAG_OLD }, rawsha1_common_tests }, { init_raw, done, fmt_default_reset, rawsha1_common_prepare, rawsha1_common_valid, rawsha1_common_split, get_binary, fmt_default_salt, { NULL }, source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_rawSHA1_axcrypt = { { AX_FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, DIGEST_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { NULL }, axcrypt_common_tests }, { init_ax, done, fmt_default_reset, rawsha1_common_prepare, rawsha1_axcrypt_valid, rawsha1_axcrypt_split, get_binary, fmt_default_salt, { NULL }, source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" #define ThrowWandException(severity,tag,context) \ { \ (void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \ tag,"`%s'",context); \ return(MagickFalse); \ } /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatMagickString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatMagickString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x],indexes[x]); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x],duplex_indexes[x]); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x],duplex_indexes[x]); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], destination_indexes[x]); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x],destination_indexes[x]); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) destination_indexes[x]=PixelGetBlackQuantum( destination->pixel_wands[id][x]); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x],indexes[x]); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominate color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatMagickString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatMagickString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatMagickString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) indexes[x]=PixelGetBlackQuantum(destination->pixel_wands[id][x]); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x],indexes[x]); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x],indexes[x]); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x],indexes[x]); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) destination_indexes[x]=PixelGetBlackQuantum( destination->pixel_wands[id][x]); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) indexes[x]=PixelGetBlackQuantum(source->pixel_wands[id][x]); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p*inch*9; const float* k1 = kernel + (p+1)*inch*9; for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr0n = outptr0 + outw; float* outptr1n = outptr1 + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0+3); float32x4_t _k06 = vld1q_f32(k0+6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1+3); float32x4_t _k16 = vld1q_f32(k1+6); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _sum0n = vld1q_f32(outptr0n); float32x4_t _sum1n = vld1q_f32(outptr1n); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r10, _k00, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r11, _k00, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r12, _k00, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r20, _k03, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r21, _k03, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r22, _k03, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r30, _k06, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r31, _k06, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r32, _k06, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r10, _k10, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r11, _k10, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r12, _k10, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r20, _k13, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r21, _k13, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r22, _k13, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r30, _k16, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r31, _k16, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r32, _k16, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); vst1q_f32(outptr0n, _sum0n); vst1q_f32(outptr1n, _sum1n); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr0 += 4; outptr1 += 4; outptr0n += 4; outptr1n += 4; } #else if (nn > 0) { asm volatile( "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q14, q15, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1 "vmla.f32 q6, q8, %e18[0] \n" "vmla.f32 q7, q8, %e21[0] \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3] \n"// _sum0n "pld [%4, #128] \n" "vld1.f32 {d26-d27}, [%4] \n"// _sum1n "vmla.f32 q12, q14, %e20[0] \n" "vmla.f32 q13, q14, %e23[0] \n" "vext.32 q8, q8, q9, #2 \n" "vext.32 q9, q14, q15, #1 \n" "vmla.f32 q6, q10, %e18[1] \n" "vmla.f32 q7, q10, %e21[1] \n" "vmla.f32 q12, q11, %f20[0] \n" "vmla.f32 q13, q11, %f23[0] \n" "pld [%6, #192] \n" "vld1.f32 {d28-d30}, [%6] \n"// r1 "add %6, #16 \n" "vmla.f32 q6, q8, %f18[0] \n" "vmla.f32 q7, q8, %f21[0] \n" "vmla.f32 q12, q9, %e20[1] \n" "vmla.f32 q13, q9, %e23[1] \n" "vext.32 q10, q14, q15, #1 \n" "vmla.f32 q6, q14, %e19[0] \n" "vmla.f32 q7, q14, %e22[0] \n" "vmla.f32 q12, q14, %e18[0] \n" "vmla.f32 q13, q14, %e21[0] \n" "vext.32 q11, q14, q15, #2 \n" "vmla.f32 q6, q10, %e19[1] \n" "vmla.f32 q7, q10, %e22[1] \n" "vmla.f32 q12, q10, %e18[1] \n" "vmla.f32 q13, q10, %e21[1] \n" "pld [%7, #192] \n" "vld1.f32 {d16-d18}, [%7 :64] \n"// r2 "add %7, #16 \n" "vmla.f32 q6, q11, %f19[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q12, q11, %f18[0] \n" "vmla.f32 q13, q11, %f21[0] \n" "vext.32 q10, q8, q9, #1 \n" "vmla.f32 q6, q8, %e20[0] \n" "vmla.f32 q7, q8, %e23[0] \n" "vmla.f32 q12, q8, %e19[0] \n" "vmla.f32 q13, q8, %e22[0] \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e20[1] \n" "vmla.f32 q7, q10, %e23[1] \n" "vmla.f32 q12, q10, %e19[1] \n" "vmla.f32 q13, q10, %e22[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "vmla.f32 q6, q11, %f20[0] \n" "vmla.f32 q7, q11, %f23[0] \n" "vmla.f32 q12, q11, %f19[0] \n" "vmla.f32 q13, q11, %f22[0] \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vst1.f32 {d12-d13}, [%1 : 64]!\n" "vst1.f32 {d14-d15}, [%2 : 64]!\n" "vext.32 q11, q14, q15, #2 \n" "vst1.f32 {d24-d25}, [%3]! \n" "vst1.f32 {d26-d27}, [%4]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %5, #16 \n" "sub %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); float32x4_t _sum0n = vmulq_f32(_r10, _k00); float32x4_t _sum1n = vmulq_f32(_r10, _k10); _sum0n = vmlaq_f32(_sum0n, _r20, _k03); _sum1n = vmlaq_f32(_sum1n, _r20, _k13); _sum0n = vmlaq_f32(_sum0n, _r30, _k06); _sum1n = vmlaq_f32(_sum1n, _r30, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); _sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3); _sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); *outptr0n = vaddvq_f32(_sum0n); *outptr1n = vaddvq_f32(_sum1n); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n)); float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); *outptr0n = vget_lane_f32(_ss01n, 0); *outptr1n = vget_lane_f32(_ss01n, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum0n = 0.f; float sum1 = 0.f; float sum1n = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; sum0n += r1[0] * k0[0]; sum0n += r1[1] * k0[1]; sum0n += r1[2] * k0[2]; sum0n += r2[0] * k0[3]; sum0n += r2[1] * k0[4]; sum0n += r2[2] * k0[5]; sum0n += r3[0] * k0[6]; sum0n += r3[1] * k0[7]; sum0n += r3[2] * k0[8]; sum1n += r1[0] * k1[0]; sum1n += r1[1] * k1[1]; sum1n += r1[2] * k1[2]; sum1n += r2[0] * k1[3]; sum1n += r2[1] * k1[4]; sum1n += r2[2] * k1[5]; sum1n += r3[0] * k1[6]; sum1n += r3[1] * k1[7]; sum1n += r3[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; *outptr0n += sum0n; *outptr1n += sum1n; #endif // __ARM_NEON r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr0n++; outptr1n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr1 += outw; outptr0n += outw; outptr1n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; outptr1 += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r0 "add %3, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n"// _sum1 "vmul.f32 q14, q8, %e12[0] \n" "vmul.f32 q15, q8, %e15[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e12[1] \n" "vmla.f32 q7, q10, %e15[1] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q14, q11, %f12[0] \n" "vmla.f32 q15, q11, %f15[0] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q14, q10, %e13[1] \n" "vmla.f32 q15, q10, %e16[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5] \n"// r2 "add %5, #16 \n" "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "vmla.f32 q14, q8, %e14[0] \n" "vmla.f32 q15, q8, %e17[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e14[1] \n" "vmla.f32 q7, q10, %e17[1] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmla.f32 q15, q11, %f17[0] \n" "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0++; r1++; r2++; outptr0++; outptr1++; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9; k1 += 9; } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k3456 = vld1q_f32(kernel0+3); float32x4_t _k6789 = vld1q_f32(kernel0+6); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum1 = vld1q_f32(outptr); float32x4_t _sum3 = vld1q_f32(outptr2); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0); float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r10, _k0123, 0); float32x4_t _sum4 = vmulq_laneq_f32(_r11, _k0123, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r12, _k0123, 2); _sum4 = vfmaq_laneq_f32(_sum4, _r20, _k3456, 0); _sum3 = vfmaq_laneq_f32(_sum3, _r21, _k3456, 1); _sum4 = vfmaq_laneq_f32(_sum4, _r22, _k3456, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r30, _k6789, 0); _sum4 = vfmaq_laneq_f32(_sum4, _r31, _k6789, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r32, _k6789, 2); _sum1 = vaddq_f32(_sum1, _sum2); _sum3 = vaddq_f32(_sum3, _sum4); vst1q_f32(outptr, _sum1); vst1q_f32(outptr2, _sum3); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1 :64] \n"// _sum "vmla.f32 q7, q9, %e14[0] \n" "vmul.f32 q6, q11, %e14[1] \n" "vmul.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// _sum2 "vmla.f32 q8, q9, %e14[0] \n" "vmul.f32 q14, q11, %e14[1] \n" "vmul.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); float32x4_t _sum2 = vmulq_f32(_r10, _k0123); _sum2 = vmlaq_f32(_sum2, _r20, _k3456); _sum2 = vmlaq_f32(_sum2, _r30, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); _sum2 = vsetq_lane_f32(*outptr2, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum1 = vld1q_f32(outptr); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0); float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2); _sum1 = vaddq_f32(_sum1, _sum2); vst1q_f32(outptr, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum "vmla.f32 q7, q8, %e10[0] \n" "vmul.f32 q13, q10, %e10[1] \n" "vmul.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd4 // interleave weights int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; Mat kernel_tm2(8*8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4); #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; float* ktm2 = kernel_tm2.channel(pp); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); int q=0; #if __ARM_NEON && __aarch64__ for (; q+3<inch; q+=4) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k02 = kernel0_tm.row(q+2); const float* k03 = kernel0_tm.row(q+3); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); const float* k12 = kernel1_tm.row(q+2); const float* k13 = kernel1_tm.row(q+3); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q+1); const float* k22 = kernel2_tm.row(q+2); const float* k23 = kernel2_tm.row(q+3); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q+1); const float* k32 = kernel3_tm.row(q+2); const float* k33 = kernel3_tm.row(q+3); for (int r=0; r<16; r++) { // split into two asm blocks for gcc reject over 30 oprands :( asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k02), // %3 "=r"(k03), // %4 "=r"(k10), // %5 "=r"(k11), // %6 "=r"(k12), // %7 "=r"(k13) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k02), "4"(k03), "5"(k10), "6"(k11), "7"(k12), "8"(k13) : "cc", "memory", "v0", "v1", "v2", "v3" ); asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k20), // %1 "=r"(k21), // %2 "=r"(k22), // %3 "=r"(k23), // %4 "=r"(k30), // %5 "=r"(k31), // %6 "=r"(k32), // %7 "=r"(k33) // %8 : "0"(ktm2), "1"(k20), "2"(k21), "3"(k22), "4"(k23), "5"(k30), "6"(k31), "7"(k32), "8"(k33) : "cc", "memory", "v0", "v1", "v2", "v3" ); } } #endif // __ARM_NEON && __aarch64__ for (; q+1<inch; q+=2) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q+1); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q+1); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%7], #16 \n" "ld1 {v1.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "v0", "v1" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vld1.f32 {d2-d3}, [%6 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%7 :128]! \n" "vld1.f32 {d2-d3}, [%8 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "q0", "q1" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[0 +m] = k00[m]; ktm2[4 +m] = k01[m]; ktm2[8 +m] = k10[m]; ktm2[12+m] = k11[m]; ktm2[16+m] = k20[m]; ktm2[20+m] = k21[m]; ktm2[24+m] = k30[m]; ktm2[28+m] = k31[m]; } k00 += 4; k01 += 4; k10 += 4; k11 += 4; k20 += 4; k21 += 4; k30 += 4; k31 += 4; ktm2 += 32; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "v0", "v1" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "q0", "q1" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[0 +m] = k00[m]; ktm2[4 +m] = k10[m]; ktm2[8 +m] = k20[m]; ktm2[12+m] = k30[m]; } k00 += 4; k10 += 4; k20 += 4; k30 += 4; ktm2 += 16; #endif // __ARM_NEON } } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start); const Mat kernel0_tm = kernel_tm.channel(p); int q = 0; for (; q<inch; q++) { const float* k00 = kernel0_tm.row(q); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "v0" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d0-d1}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "q0" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[m] = k00[m]; } k00 += 4; ktm2 += 4; #endif // __ARM_NEON } } } kernel_tm = kernel_tm2; } static void conv3x3s1_winograd64_transform_kernel_neon5(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd5 // interleave weights // Mat kernel_tm2(8*8, inch, outch); // Mat kernel_tm2(inch, 64, outch); // Mat kernel_tm2(4*4*(inch/4), 64, outch/4); Mat kernel_tm2(8*4*(inch/4) + 8*(inch%4), 64, outch/8 + (outch%8)/4 + outch%4); int p=0; for (; p+7<outch; p+=8) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); const Mat kernel4_tm = kernel_tm.channel(p+4); const Mat kernel5_tm = kernel_tm.channel(p+5); const Mat kernel6_tm = kernel_tm.channel(p+6); const Mat kernel7_tm = kernel_tm.channel(p+7); Mat ktm2 = kernel_tm2.channel(p/8); for (int r=0; r<64; r++) { float* ktm2p = ktm2.row(r); int q=0; for (; q+3<inch; q+=4) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm0_1 = kernel0_tm.row(q+1); const float* ktm0_2 = kernel0_tm.row(q+2); const float* ktm0_3 = kernel0_tm.row(q+3); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm0_1[r]; ktm2p[2] = ktm0_2[r]; ktm2p[3] = ktm0_3[r]; ktm2p += 4; const float* ktm1_0 = kernel1_tm.row(q); const float* ktm1_1 = kernel1_tm.row(q+1); const float* ktm1_2 = kernel1_tm.row(q+2); const float* ktm1_3 = kernel1_tm.row(q+3); ktm2p[0] = ktm1_0[r]; ktm2p[1] = ktm1_1[r]; ktm2p[2] = ktm1_2[r]; ktm2p[3] = ktm1_3[r]; ktm2p += 4; const float* ktm2_0 = kernel2_tm.row(q); const float* ktm2_1 = kernel2_tm.row(q+1); const float* ktm2_2 = kernel2_tm.row(q+2); const float* ktm2_3 = kernel2_tm.row(q+3); ktm2p[0] = ktm2_0[r]; ktm2p[1] = ktm2_1[r]; ktm2p[2] = ktm2_2[r]; ktm2p[3] = ktm2_3[r]; ktm2p += 4; const float* ktm3_0 = kernel3_tm.row(q); const float* ktm3_1 = kernel3_tm.row(q+1); const float* ktm3_2 = kernel3_tm.row(q+2); const float* ktm3_3 = kernel3_tm.row(q+3); ktm2p[0] = ktm3_0[r]; ktm2p[1] = ktm3_1[r]; ktm2p[2] = ktm3_2[r]; ktm2p[3] = ktm3_3[r]; ktm2p += 4; const float* ktm4_0 = kernel4_tm.row(q); const float* ktm4_1 = kernel4_tm.row(q+1); const float* ktm4_2 = kernel4_tm.row(q+2); const float* ktm4_3 = kernel4_tm.row(q+3); ktm2p[0] = ktm4_0[r]; ktm2p[1] = ktm4_1[r]; ktm2p[2] = ktm4_2[r]; ktm2p[3] = ktm4_3[r]; ktm2p += 4; const float* ktm5_0 = kernel5_tm.row(q); const float* ktm5_1 = kernel5_tm.row(q+1); const float* ktm5_2 = kernel5_tm.row(q+2); const float* ktm5_3 = kernel5_tm.row(q+3); ktm2p[0] = ktm5_0[r]; ktm2p[1] = ktm5_1[r]; ktm2p[2] = ktm5_2[r]; ktm2p[3] = ktm5_3[r]; ktm2p += 4; const float* ktm6_0 = kernel6_tm.row(q); const float* ktm6_1 = kernel6_tm.row(q+1); const float* ktm6_2 = kernel6_tm.row(q+2); const float* ktm6_3 = kernel6_tm.row(q+3); ktm2p[0] = ktm6_0[r]; ktm2p[1] = ktm6_1[r]; ktm2p[2] = ktm6_2[r]; ktm2p[3] = ktm6_3[r]; ktm2p += 4; const float* ktm7_0 = kernel7_tm.row(q); const float* ktm7_1 = kernel7_tm.row(q+1); const float* ktm7_2 = kernel7_tm.row(q+2); const float* ktm7_3 = kernel7_tm.row(q+3); ktm2p[0] = ktm7_0[r]; ktm2p[1] = ktm7_1[r]; ktm2p[2] = ktm7_2[r]; ktm2p[3] = ktm7_3[r]; ktm2p += 4; } for (; q<inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); const float* ktm4_0 = kernel4_tm.row(q); const float* ktm5_0 = kernel5_tm.row(q); const float* ktm6_0 = kernel6_tm.row(q); const float* ktm7_0 = kernel7_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p[4] = ktm4_0[r]; ktm2p[5] = ktm5_0[r]; ktm2p[6] = ktm6_0[r]; ktm2p[7] = ktm7_0[r]; ktm2p += 8; } } } for (; p+3<outch; p+=4) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4); for (int r=0; r<64; r++) { float* ktm2p = ktm2.row(r); int q=0; for (; q+3<inch; q+=4) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm0_1 = kernel0_tm.row(q+1); const float* ktm0_2 = kernel0_tm.row(q+2); const float* ktm0_3 = kernel0_tm.row(q+3); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm0_1[r]; ktm2p[2] = ktm0_2[r]; ktm2p[3] = ktm0_3[r]; ktm2p += 4; const float* ktm1_0 = kernel1_tm.row(q); const float* ktm1_1 = kernel1_tm.row(q+1); const float* ktm1_2 = kernel1_tm.row(q+2); const float* ktm1_3 = kernel1_tm.row(q+3); ktm2p[0] = ktm1_0[r]; ktm2p[1] = ktm1_1[r]; ktm2p[2] = ktm1_2[r]; ktm2p[3] = ktm1_3[r]; ktm2p += 4; const float* ktm2_0 = kernel2_tm.row(q); const float* ktm2_1 = kernel2_tm.row(q+1); const float* ktm2_2 = kernel2_tm.row(q+2); const float* ktm2_3 = kernel2_tm.row(q+3); ktm2p[0] = ktm2_0[r]; ktm2p[1] = ktm2_1[r]; ktm2p[2] = ktm2_2[r]; ktm2p[3] = ktm2_3[r]; ktm2p += 4; const float* ktm3_0 = kernel3_tm.row(q); const float* ktm3_1 = kernel3_tm.row(q+1); const float* ktm3_2 = kernel3_tm.row(q+2); const float* ktm3_3 = kernel3_tm.row(q+3); ktm2p[0] = ktm3_0[r]; ktm2p[1] = ktm3_1[r]; ktm2p[2] = ktm3_2[r]; ktm2p[3] = ktm3_3[r]; ktm2p += 4; } for (; q<inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p += 4; } } } for (; p<outch; p++) { const Mat kernel0_tm = kernel_tm.channel(p); Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4+p%4); for (int r=0; r<64; r++) { float* ktm2p = ktm2.row(r); int q=0; for (; q+3<inch; q+=4) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm0_1 = kernel0_tm.row(q+1); const float* ktm0_2 = kernel0_tm.row(q+2); const float* ktm0_3 = kernel0_tm.row(q+3); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm0_1[r]; ktm2p[2] = ktm0_2[r]; ktm2p[3] = ktm0_3[r]; ktm2p += 4; } for (; q<inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p += 1; } } } kernel_tm = kernel_tm2; } #if 0//TODO remove old code sometime later static void conv3x3s1_winograd64_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8*8, w_tm/8 * h_tm/8, inch); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm = img0_tm.row(i * w_tm/8 + j); // TODO neon optimize for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; r0_tm += 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8*8, w_tm/8 * h_tm/8, outch); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output2_tm = vld1q_f32(output2_tm); float32x4_t _output3_tm = vld1q_f32(output3_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k00 = vld1q_f32(k00); k00 += 64; float32x4_t _k01 = vld1q_f32(k00); k00 += 64; float32x4_t _k02 = vld1q_f32(k00); k00 += 64; float32x4_t _k03 = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k02); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k03); float32x4_t _k10 = vld1q_f32(k10); k10 += 64; float32x4_t _k11 = vld1q_f32(k10); k10 += 64; float32x4_t _k12 = vld1q_f32(k10); k10 += 64; float32x4_t _k13 = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tm = vmlaq_f32(_output1_tm, _r2, _k12); _output1_tm = vmlaq_f32(_output1_tm, _r3, _k13); float32x4_t _k20 = vld1q_f32(k20); k20 += 64; float32x4_t _k21 = vld1q_f32(k20); k20 += 64; float32x4_t _k22 = vld1q_f32(k20); k20 += 64; float32x4_t _k23 = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tm = vmlaq_f32(_output2_tm, _r0, _k20); _output2_tm = vmlaq_f32(_output2_tm, _r1, _k21); _output2_tm = vmlaq_f32(_output2_tm, _r2, _k22); _output2_tm = vmlaq_f32(_output2_tm, _r3, _k23); float32x4_t _k30 = vld1q_f32(k30); k30 += 64; float32x4_t _k31 = vld1q_f32(k30); k30 += 64; float32x4_t _k32 = vld1q_f32(k30); k30 += 64; float32x4_t _k33 = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tm = vmlaq_f32(_output3_tm, _r0, _k30); _output3_tm = vmlaq_f32(_output3_tm, _r1, _k31); _output3_tm = vmlaq_f32(_output3_tm, _r2, _k32); _output3_tm = vmlaq_f32(_output3_tm, _r3, _k33); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output2_tm, _output2_tm); vst1q_f32(output3_tm, _output3_tm); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm); float32x4_t _output2_tmn = vld1q_f32(output2_tm); float32x4_t _output3_tmn = vld1q_f32(output3_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k00n = vld1q_f32(k00); k00 += 64; float32x4_t _k01n = vld1q_f32(k00); k00 += 64; float32x4_t _k02n = vld1q_f32(k00); k00 += 64; float32x4_t _k03n = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k02n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k03n); float32x4_t _k10n = vld1q_f32(k10); k10 += 64; float32x4_t _k11n = vld1q_f32(k10); k10 += 64; float32x4_t _k12n = vld1q_f32(k10); k10 += 64; float32x4_t _k13n = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); _output1_tmn = vmlaq_f32(_output1_tmn, _r2n, _k12n); _output1_tmn = vmlaq_f32(_output1_tmn, _r3n, _k13n); float32x4_t _k20n = vld1q_f32(k20); k20 += 64; float32x4_t _k21n = vld1q_f32(k20); k20 += 64; float32x4_t _k22n = vld1q_f32(k20); k20 += 64; float32x4_t _k23n = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tmn = vmlaq_f32(_output2_tmn, _r0n, _k20n); _output2_tmn = vmlaq_f32(_output2_tmn, _r1n, _k21n); _output2_tmn = vmlaq_f32(_output2_tmn, _r2n, _k22n); _output2_tmn = vmlaq_f32(_output2_tmn, _r3n, _k23n); float32x4_t _k30n = vld1q_f32(k30); k30 += 64; float32x4_t _k31n = vld1q_f32(k30); k30 += 64; float32x4_t _k32n = vld1q_f32(k30); k30 += 64; float32x4_t _k33n = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tmn = vmlaq_f32(_output3_tmn, _r0n, _k30n); _output3_tmn = vmlaq_f32(_output3_tmn, _r1n, _k31n); _output3_tmn = vmlaq_f32(_output3_tmn, _r2n, _k32n); _output3_tmn = vmlaq_f32(_output3_tmn, _r3n, _k33n); vst1q_f32(output0_tm, _output0_tmn); vst1q_f32(output1_tm, _output1_tmn); vst1q_f32(output2_tm, _output2_tmn); vst1q_f32(output3_tm, _output3_tmn); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; } #else // __aarch64__ asm volatile( "mov r4, #8 \n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "0: \n" "pld [%4, #256] \n" "vld1.f32 {d0-d3}, [%4 :128]! \n"//q0 q1 = _r0 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k00 "add %8, %8, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]\n"//q12 q13 = _output1_tm "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k10 "add %9, %9, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n"//q2 q3 = _r1 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k01 "add %8, %8, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k11 "add %9, %9, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]!\n"//q4 q5 = _r2 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k02 "add %8, %8, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k12 "add %9, %9, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%7, #256] \n" "vld1.f32 {d12-d15}, [%7 :128]!\n"//q6 q7 = _r3 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k03 "sub %8, %8, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k13 "sub %9, %9, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%0 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]\n"//q8 q9 = _output2_tm "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k20 "add %10, %10, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "vst1.f32 {d24-d27}, [%1 :128]!\n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]\n"//q12 q13 = _output3_tm "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k30 "add %11, %11, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k21 "add %10, %10, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k31 "add %11, %11, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k22 "add %10, %10, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k32 "add %11, %11, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k23 "sub %10, %10, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k33 "sub %11, %11, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%2 :128]!\n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "subs r4, r4, #1 \n" "vst1.f32 {d24-d27}, [%3 :128]!\n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(k00), // %8 "=r"(k10), // %9 "=r"(k20), // %10 "=r"(k30) // %11 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(k00), "9"(k10), "10"(k20), "11"(k30) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k00 -= 64; k10 -= 64; k20 -= 64; k30 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k00[m]; k00 += 64; output0_tm[m] += r1[m] * k00[m]; k00 += 64; output0_tm[m] += r2[m] * k00[m]; k00 += 64; output0_tm[m] += r3[m] * k00[m]; k00 += 64; k00 -= 64 * 4; output1_tm[m] += r0[m] * k10[m]; k10 += 64; output1_tm[m] += r1[m] * k10[m]; k10 += 64; output1_tm[m] += r2[m] * k10[m]; k10 += 64; output1_tm[m] += r3[m] * k10[m]; k10 += 64; k10 -= 64 * 4; output2_tm[m] += r0[m] * k20[m]; k20 += 64; output2_tm[m] += r1[m] * k20[m]; k20 += 64; output2_tm[m] += r2[m] * k20[m]; k20 += 64; output2_tm[m] += r3[m] * k20[m]; k20 += 64; k20 -= 64 * 4; output3_tm[m] += r0[m] * k30[m]; k30 += 64; output3_tm[m] += r1[m] * k30[m]; k30 += 64; output3_tm[m] += r2[m] * k30[m]; k30 += 64; output3_tm[m] += r3[m] * k30[m]; k30 += 64; k30 -= 64 * 4; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output1_tm[m] += r0[m] * k1[m]; output2_tm[m] += r0[m] * k2[m]; output3_tm[m] += r0[m] * k3[m]; } r0 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; } } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); const float* k2 = kernel0_tm.row(q+2); const float* k3 = kernel0_tm.row(q+3); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k2); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k3); vst1q_f32(output0_tm, _output0_tm); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k0n = vld1q_f32(k0); float32x4_t _k1n = vld1q_f32(k1); float32x4_t _k2n = vld1q_f32(k2); float32x4_t _k3n = vld1q_f32(k3); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k2n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k3n); vst1q_f32(output0_tm, _output0_tmn); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; } #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "mov r4, %0 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "vmla.f32 q15, q9, q11 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(k0), // %5 "=r"(k1), // %6 "=r"(k2), // %7 "=r"(k3) // %8 : "0"(output0_tm), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(k0), "6"(k1), "7"(k2), "8"(k3) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k0 -= 64; k1 -= 64; k2 -= 64; k3 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; output0_tm[m] += r2[m] * k2[m]; output0_tm[m] += r3[m] * k3[m]; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 64; output0_tm += 64; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm = out0_tm.row(i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; // TODO neon optimize for (int m=0; m<8; m++) { float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm += 8; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm01 = img0_tm.row(i * w_tm/8 + j); float* r0_tm23 = img0_tm.row(tiles + i * w_tm/8 + j); float* r0_tm45 = img0_tm.row(tiles * 2 + i * w_tm/8 + j); float* r0_tm67 = img0_tm.row(tiles * 3 + i * w_tm/8 + j); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[4] = { r0_tm01, r0_tm23, r0_tm45, r0_tm67 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m/2] + (m%2) * 8; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k1n = vld1q_f32(k1+4); float32x4_t _k1nn = vld1q_f32(k1+8); float32x4_t _k1nnn = vld1q_f32(k1+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; float32x4_t _k1; float32x4_t _k1n; float32x4_t _k1nn; float32x4_t _k1nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #512] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" "vld1.f32 {%e6-%f6}, [%0 :128]! \n" "vld1.f32 {%e8-%f8}, [%1 :128]! \n" "vld1.f32 {%e7-%f7}, [%0 :128]! \n" "vld1.f32 {%e9-%f9}, [%1 :128]! \n" : "=r"(k0), // %0 "=r"(k1), // %1 "=w"(_k0), // %2 "=w"(_k0n), // %3 "=w"(_k1), // %4 "=w"(_k1n), // %5 "=w"(_k0nn), // %6 "=w"(_k0nnn), // %7 "=w"(_k1nn), // %8 "=w"(_k1nnn) // %9 : "0"(k0), "1"(k1) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "mov r4, %1 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "0: \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "subs %0, #1 \n" "vst1.f32 {d20-d23}, [r4 :128]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k0), // %8 "w"(_k0n), // %9 "w"(_k1), // %10 "w"(_k1n), // %11 "w"(_k0nn), // %12 "w"(_k0nnn), // %13 "w"(_k1nn), // %14 "w"(_k1nnn) // %15 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q9, q13, %q7 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "vmla.f32 q8, q14, %q8 \n" "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q9, q15, %q9 \n" "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q10, q14, %q12 \n" "vmla.f32 q11, q15, %q13 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k0), // %6 "w"(_k0n), // %7 "w"(_k1), // %8 "w"(_k1n), // %9 "w"(_k0nn), // %10 "w"(_k0nnn), // %11 "w"(_k1nn), // %12 "w"(_k1nnn) // %13 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; } r0 += 16; r1 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; k1 += 16; #endif // __aarch64__ #else k0 += 16; k1 += 16; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" : "=r"(k0), // %0 "=w"(_k0), // %1 "=w"(_k0n), // %2 "=w"(_k0nn), // %3 "=w"(_k0nnn) // %4 : "0"(k0) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q6 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "vmla.f32 q11, q13, %q7 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k0), // %4 "w"(_k0n), // %5 "w"(_k0nn), // %6 "w"(_k0nnn) // %7 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; #endif // __aarch64__ #else k0 += 16; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm01 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm23 = out0_tm.row(tiles + i * w_tm/8 + j); const float* output0_tm45 = out0_tm.row(tiles * 2 + i * w_tm/8 + j); const float* output0_tm67 = out0_tm.row(tiles * 3 + i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[4] = { output0_tm01, output0_tm23, output0_tm45, output0_tm67 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m/2] + (m%2) * 8; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon3(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles * 2); float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles * 3); float* r0_tm4 = img0_tm.row(i * w_tm/8 + j + tiles * 4); float* r0_tm5 = img0_tm.row(i * w_tm/8 + j + tiles * 5); float* r0_tm6 = img0_tm.row(i * w_tm/8 + j + tiles * 6); float* r0_tm7 = img0_tm.row(i * w_tm/8 + j + tiles * 7); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[8] = { r0_tm0, r0_tm1, r0_tm2, r0_tm3, r0_tm4, r0_tm5, r0_tm6, r0_tm7 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); out0_tm.fill(0.f); out1_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); float32x4_t _k10 = vld1q_f32(k10); float32x4_t _k10n = vld1q_f32(k10+4); float32x4_t _k11 = vld1q_f32(k11); float32x4_t _k11n = vld1q_f32(k11+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; float32x4_t _k10; float32x4_t _k10n; float32x4_t _k11; float32x4_t _k11n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e6-%f6}, [%1 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {%e8-%f8}, [%2 :128]! \n" "pld [%3, #256] \n" "vld1.f32 {%e10-%f10}, [%3 :128]! \n" "vld1.f32 {%e5-%f5}, [%0 :128]! \n" "vld1.f32 {%e7-%f7}, [%1 :128]! \n" "vld1.f32 {%e9-%f9}, [%2 :128]! \n" "vld1.f32 {%e11-%f11}, [%3 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=r"(k10), // %2 "=r"(k11), // %3 "=w"(_k00), // %4 "=w"(_k00n), // %5 "=w"(_k01), // %6 "=w"(_k01n), // %7 "=w"(_k10), // %8 "=w"(_k10n), // %9 "=w"(_k11), // %10 "=w"(_k11n) // %11 : "0"(k00), "1"(k01), "2"(k10), "3"(k11) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(r1) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(r1), "w"(_k00), // %10 "w"(_k00n), // %11 "w"(_k01), // %12 "w"(_k01n), // %13 "w"(_k10), // %14 "w"(_k10n), // %15 "w"(_k11), // %16 "w"(_k11n) // %17 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; #else asm volatile( "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(output0_tm), "1"(output1_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n), // %11 "w"(_k10), // %12 "w"(_k10n), // %13 "w"(_k11), // %14 "w"(_k11n) // %15 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; output1_tm[m] += r0[m] * k10[m]; output1_tm[m] += r1[m] * k11[m]; } r0 += 8; r1 += 8; output0_tm += 8; output1_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k10 = vld1q_f32(k10); float32x4_t _k10n = vld1q_f32(k10+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k10; float32x4_t _k10n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" : "=r"(k00), // %0 "=r"(k10), // %1 "=w"(_k00), // %2 "=w"(_k00n), // %3 "=w"(_k10), // %4 "=w"(_k10n) // %5 : "0"(k00), "1"(k10) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0) // %3 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k10), // %10 "w"(_k10n) // %11 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; #else asm volatile( "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "vmla.f32 q9, q13, %q7 \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q8 \n" "vmla.f32 q11, q13, %q9 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(r0) // %2 : "0"(output0_tm), "1"(output1_tm), "2"(r0), "w"(_k00), // %6 "w"(_k00n), // %7 "w"(_k10), // %8 "w"(_k10n) // %9 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output1_tm[m] += r0[m] * k10[m]; } r0 += 8; output0_tm += 8; output1_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k10 += 8; #endif // __aarch64__ #else k00 += 8; k10 += 8; #endif // __ARM_NEON } } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=w"(_k00), // %2 "=w"(_k00n), // %3 "=w"(_k01), // %4 "=w"(_k01n) // %5 : "0"(k00), "1"(k01) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n) // %11 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "vmla.f32 q9, q13, %q7 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q8 \n" "vmla.f32 q9, q15, %q9 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k00), // %6 "w"(_k00n), // %7 "w"(_k01), // %8 "w"(_k01n) // %9 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; } r0 += 8; r1 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k00 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); #else float32x4_t _k00; float32x4_t _k00n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" : "=r"(k00), // %0 "=w"(_k00), // %1 "=w"(_k00n) // %2 : "0"(k00) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00), // %4 "w"(_k00n) // %5 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; } r0 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; #endif // __aarch64__ #else k00 += 8; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles * 2); const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles * 3); const float* output0_tm4 = out0_tm.row(i * w_tm/8 + j + tiles * 4); const float* output0_tm5 = out0_tm.row(i * w_tm/8 + j + tiles * 5); const float* output0_tm6 = out0_tm.row(i * w_tm/8 + j + tiles * 6); const float* output0_tm7 = out0_tm.row(i * w_tm/8 + j + tiles * 7); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[8] = { output0_tm0, output0_tm1, output0_tm2, output0_tm3, output0_tm4, output0_tm5, output0_tm6, output0_tm7 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m]; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } #endif static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff+4); #endif // __ARM_NEON #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w*2; const float* r3 = r0 + w*3; #if __aarch64__ for (int m=0; m+3<8; m+=4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0+4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1+4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2+4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3+4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w*4; r1 += w*4; r2 += w*4; r3 += w*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m+3<8; m+=4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2+4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3); float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3); float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3); float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3); t0 += 8*4; t1 += 8*4; t2 += 8*4; t3 += 8*4; r0_tm0_0 += img0_tm.w*tiles*2*4; r0_tm0_4 += img0_tm.w*tiles*2*4; r0_tm1_0 += img0_tm.w*tiles*2*4; r0_tm1_4 += img0_tm.w*tiles*2*4; r0_tm2_0 += img0_tm.w*tiles*2*4; r0_tm2_4 += img0_tm.w*tiles*2*4; r0_tm3_0 += img0_tm.w*tiles*2*4; r0_tm3_4 += img0_tm.w*tiles*2*4; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); int step = img0_tm.w*tiles*2*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%2], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4], %26 \n" "vst1.f32 {d17[1]}, [%6], %26 \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "sub %0, %0, #12 \n" "sub %2, %2, #12 \n" "sub %4, %4, #12 \n" "sub %6, %6, #12 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1], %26 \n" "vst1.f32 {d4-d5}, [%3], %26 \n" "vst1.f32 {d6-d7}, [%5], %26 \n" "vst1.f32 {d12-d13}, [%7], %26 \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0] \n" "vst1.f32 {d16[1]}, [%2] \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4] \n" "vst1.f32 {d17[1]}, [%6] \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1] \n" "vst1.f32 {d4-d5}, [%3] \n" "vst1.f32 {d6-d7}, [%5] \n" "vst1.f32 {d12-d13}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm0_4), // %1 "=r"(r0_tm1_0), // %2 "=r"(r0_tm1_4), // %3 "=r"(r0_tm2_0), // %4 "=r"(r0_tm2_4), // %5 "=r"(r0_tm3_0), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm0_4), "2"(r0_tm1_0), "3"(r0_tm1_4), "4"(r0_tm2_0), "5"(r0_tm2_4), "6"(r0_tm3_0), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_0[1] = tmp12a + tmp12b; r0_tm_0[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_0[3] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_4[1] = tmp56a + tmp56b; r0_tm_4[2] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 2; r0_tm_4 += img0_tm.w * tiles * 2; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const float* ktm = kernel_tm.channel(pp); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; #if __ARM_NEON && __aarch64__ for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n"// v0 v1 v2 v3 = _k00 _k01 _k02 _k03 "prfm pldl1keep, [%8, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n"// v4 v5 v6 v7 = _k10 _k11 _k12 _k13 "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"// v8 v9 v10 v11 = _k20 _k21 _k22 _k23 "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"// v12 v13 v14 v15 = _k30 _k31 _k32 _k33 // tile loop "lsr w1, %w18, #2 \n"// w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n"// "ld1 {v16.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "add x4, %0, #16 \n"// x4 = %0 next "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "add x5, %1, #16 \n"// x5 = %1 next "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "add x6, %2, #16 \n"// x6 = %2 next "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "add x7, %3, #16 \n"// x7 = %3 next "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "add %0, %0, #32 \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "add %1, %1, #32 \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "add %2, %2, #32 \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" "add %3, %3, #32 \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "st1 {v24.4s}, [x4] \n" "add x4, x4, #32 \n" "fmla v20.4s, v16.4s, v0.4s \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v22.4s, v16.4s, v8.4s \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "st1 {v25.4s}, [x5] \n" "add x5, x5, #32 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "st1 {v26.4s}, [x6] \n" "add x6, x6, #32 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "st1 {v27.4s}, [x7] \n" "add x7, x7, #32 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "st1 {v21.4s}, [%1] \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "st1 {v22.4s}, [%2] \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "st1 {v23.4s}, [%3] \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" "st1 {v24.4s}, [x4], #16 \n" "mov %0, x4 \n" "st1 {v25.4s}, [x5], #16 \n" "mov %1, x5 \n" "subs w1, w1, #1 \n" "st1 {v26.4s}, [x6], #16 \n" "mov %2, x6 \n" "st1 {v27.4s}, [x7], #16 \n" "mov %3, x7 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w18, #3 \n"// w1 = remain = tiles & 3; "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" "st1 {v20.4s}, [%0], #16 \n" "st1 {v21.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v22.4s}, [%2], #16 \n" "st1 {v23.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(ktm) // %8 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(ktm), "r"(tiles) // %18 : "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } #endif // __ARM_NEON && __aarch64__ for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4s, v1.4s}, [%6], #32 \n"// v0 v1 = _k00 _k01 "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.4s, v3.4s}, [%6], #32 \n"// v2 v3 = _k10 _k11 "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n"// v4 v5 = _k20 _k21 "prfm pldl1keep, [%6, #256] \n" "ld1 {v6.4s, v7.4s}, [%6], #32 \n"// v6 v7 = _k30 _k31 // tile loop "lsr w1, %w14, #2 \n"// w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w14, #3 \n"// w1 = remain = tiles & 3; "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21" ); #else asm volatile( "mov r0, #16 \n"// r0 = r = 16 "0: \n" "pld [%6, #256] \n" "vld1.f32 {d0-d3}, [%6 :128]! \n"// q0 q1 = _k00 _k01 "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6 :128]! \n"// q2 q3 = _k10 _k11 "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n"// q4 q5 = _k20 _k21 "pld [%6, #256] \n" "vld1.f32 {d12-d15}, [%6 :128]! \n"// q6 q7 = _k30 _k31 // tile loop "lsr r1, %14, #2 \n"// r1 = nn = tiles >> 2 "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "1: \n" "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and r1, %14, #3 \n"// r1 = remain = tiles & 3; "cmp r1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 3b \n" //END remain loop "4: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int r=0; r<16; r++) { for (int t=0; t<tiles; t++) { for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[0 +m]; output0_tm[m] += r1[m] * ktm[4 +m]; output1_tm[m] += r0[m] * ktm[8 +m]; output1_tm[m] += r1[m] * ktm[12+m]; output2_tm[m] += r0[m] * ktm[16+m]; output2_tm[m] += r1[m] * ktm[20+m]; output3_tm[m] += r0[m] * ktm[24+m]; output3_tm[m] += r1[m] * ktm[28+m]; } r0 += 4; r1 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 32; } #endif // __ARM_NEON } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n"// v0 v1 = _k00 _k10 "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n"// v2 v3 = _k20 _k30 // tile loop "mov w1, %w12 \n"// w1 = tiles "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v17.4s}, [%0] \n" "fmla v17.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "fmla v18.4s, v16.4s, v1.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "fmla v19.4s, v16.4s, v2.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v20.4s, v16.4s, v3.4s \n" "st1 {v17.4s}, [%0], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v19.4s}, [%2], #16 \n" "st1 {v20.4s}, [%3], #16 \n" "bne 1b \n" //END tile loop "2: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20" ); #else asm volatile( "mov r0, #16 \n"// r0 = r = 16 "0: \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n"// q0 q1 = _k00 _k10 "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n"// q2 q3 = _k20 _k30 // tile loop "mov r1, %12 \n"// r1 = tiles "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q1 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q2 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q3 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" //END tile loop "2: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int r=0; r<16; r++) { for (int t=0; t<tiles; t++) { for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[0 +m]; output1_tm[m] += r0[m] * ktm[4 +m]; output2_tm[m] += r0[m] * ktm[8 +m]; output3_tm[m] += r0[m] * ktm[12+m]; } r0 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 16; } #endif // __ARM_NEON } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start); out0_tm.fill(0.f); int q = 0; for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; for (int r=0; r<16; r++) { #if __ARM_NEON float32x4_t _k00 = vld1q_f32(ktm); ktm += 4; #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v17.4s, %4.4s \n" "st1 {v16.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "v16", "v17" ); #else asm volatile( "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128]! \n"// q9 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q9, %q4 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "q8", "q9" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[m]; } r0 += 4; output0_tm += 4; #endif // __ARM_NEON } #if !__ARM_NEON ktm += 4; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = { 4.f, 8.f, 16.f, 32.f }; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { #if __ARM_NEON const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*2); const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*3); const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*4); const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*5); const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*6); const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*7); #if __aarch64__ for (int m=0; m+3<8; m+=4) { float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0); float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4); float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0); float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4); float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0); float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4); float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0); float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4); float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123); float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567); float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123); float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567); // no vswp intrinsic :( float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1])); float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1])); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0_0 += out0_tm.w * tiles * 2*4; output0_tm0_4 += out0_tm.w * tiles * 2*4; output0_tm1_0 += out0_tm.w * tiles * 2*4; output0_tm1_4 += out0_tm.w * tiles * 2*4; output0_tm2_0 += out0_tm.w * tiles * 2*4; output0_tm2_4 += out0_tm.w * tiles * 2*4; output0_tm3_0 += out0_tm.w * tiles * 2*4; output0_tm3_4 += out0_tm.w * tiles * 2*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m=0; m+1<6; m+=2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8*2; t1 += 8*2; output0 += outw*2; output1 += outw*2; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; int step = out0_tm.w * tiles * 2*4 *4; asm volatile( // loop0 "vld1.f32 {d16-d17}, [%2], %21 \n" "vld1.f32 {d18-d19}, [%3], %21 \n" "vld1.f32 {d20-d21}, [%4], %21 \n" "vld1.f32 {d22-d23}, [%5], %21 \n" "vld1.f32 {d24-d25}, [%6], %21 \n" "vld1.f32 {d26-d27}, [%7], %21 \n" "vld1.f32 {d28-d29}, [%8], %21 \n" "vld1.f32 {d30-d31}, [%9], %21 \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 "vld1.f32 {d16-d17}, [%2] \n" "vld1.f32 {d18-d19}, [%3] \n" "vld1.f32 {d20-d21}, [%4] \n" "vld1.f32 {d22-d23}, [%5] \n" "vld1.f32 {d24-d25}, [%6] \n" "vld1.f32 {d26-d27}, [%7] \n" "vld1.f32 {d28-d29}, [%8] \n" "vld1.f32 {d30-d31}, [%9] \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm0_4), // %3 "=r"(output0_tm1_0), // %4 "=r"(output0_tm1_4), // %5 "=r"(output0_tm2_0), // %6 "=r"(output0_tm2_4), // %7 "=r"(output0_tm3_0), // %8 "=r"(output0_tm3_4) // %9 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm0_4), "4"(output0_tm1_0), "5"(output0_tm1_4), "6"(output0_tm2_0), "7"(output0_tm2_4), "8"(output0_tm3_0), "9"(output0_tm3_4), "w"(_coeff), // %20 "r"(step) // %21 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw*2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { float tmp024a = output0_tm_0[1] + output0_tm_0[2]; float tmp135a = output0_tm_0[1] - output0_tm_0[2]; float tmp024b = output0_tm_0[3] + output0_tm_4[0]; float tmp135b = output0_tm_0[3] - output0_tm_4[0]; float tmp024c = output0_tm_4[1] + output0_tm_4[2]; float tmp135c = output0_tm_4[1] - output0_tm_4[2]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 2; output0_tm_4 += out0_tm.w * tiles * 2; } float* output0 = out0.row(i * 6) + j * 6; for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } #if __aarch64__ static void conv3x3s1_winograd64_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; bottom_blob_tm.create(1, 64 * tiles, inch); // bottom_blob_tm.create(inch, tiles, 64); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff+4); #endif // __ARM_NEON #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w*2; const float* r3 = r0 + w*3; for (int m=0; m+3<8; m+=4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0+4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1+4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2+4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3+4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w*4; r1 += w*4; r2 += w*4; r3 += w*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles*8); float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles*16); float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles*24); for (int m=0; m+3<8; m+=4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2+4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3); r0_tm0 += img0_tm.w*tiles; r0_tm1 += img0_tm.w*tiles; r0_tm2 += img0_tm.w*tiles; r0_tm3 += img0_tm.w*tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3); t0 += 8*4; t1 += 8*4; t2 += 8*4; t3 += 8*4; r0_tm0 += img0_tm.w*tiles*25; r0_tm1 += img0_tm.w*tiles*25; r0_tm2 += img0_tm.w*tiles*25; r0_tm3 += img0_tm.w*tiles*25; } #else const float* r0 = img0.row(i * 6) + j * 6; for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm_1 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm_2 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm_3 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm_5 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm_6 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm_7 = img0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_1[0] = tmp12a + tmp12b; r0_tm_2[0] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_3[0] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_5[0] = tmp56a + tmp56b; r0_tm_6[0] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 8; r0_tm_1 += img0_tm.w * tiles * 8; r0_tm_2 += img0_tm.w * tiles * 8; r0_tm_3 += img0_tm.w * tiles * 8; r0_tm_4 += img0_tm.w * tiles * 8; r0_tm_5 += img0_tm.w * tiles * 8; r0_tm_6 += img0_tm.w * tiles * 8; r0_tm_7 += img0_tm.w * tiles * 8; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; top_blob_tm.create(1, 64 * tiles, outch); // permute // bottom_blob_tm.create(1, 64 * tiles, inch); // Mat bottom_blob_tm2(inch, tiles, 64); Mat bottom_blob_tm2(8*inch, tiles/8 + (tiles%8)/4 + tiles%4, 64); #pragma omp parallel for for (int r=0; r<64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { float* tm2p = tm2.row(i/8); const float* r0 = bottom_blob_tm; r0 += r*tiles + i; int q=0; for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(r0) :); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); vst1q_f32(tm2p, _r0); vst1q_f32(tm2p+4, _r0n); // tm2p[0] = r0[0]; // tm2p[1] = r0[1]; // tm2p[2] = r0[2]; // tm2p[3] = r0[3]; // tm2p[4] = r0[4]; // tm2p[5] = r0[5]; // tm2p[6] = r0[6]; // tm2p[7] = r0[7]; r0 += bottom_blob_tm.cstep; tm2p += 8; } } for (; i+3<tiles; i+=4) { float* tm2p = tm2.row(i/8+(i%8)/4); const float* r0 = bottom_blob_tm; r0 += r*tiles + i; int q=0; for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(r0) :); float32x4_t _r0 = vld1q_f32(r0); vst1q_f32(tm2p, _r0); // tm2p[0] = r0[0]; // tm2p[1] = r0[1]; // tm2p[2] = r0[2]; // tm2p[3] = r0[3]; r0 += bottom_blob_tm.cstep; tm2p += 4; } } for (; i<tiles; i++) { float* tm2p = tm2.row(i/8+(i%8)/4+i%4); const float* r0 = bottom_blob_tm; r0 += r*tiles + i; int q=0; for (; q<inch; q++) { tm2p[0] = r0[0]; r0 += bottom_blob_tm.cstep; tm2p += 1; } } } bottom_blob_tm = Mat(); // permute end #pragma omp parallel for for (int pp=0; pp<outch/8; pp++) { int p = pp * 8; const Mat kernel_tm0 = kernel_tm.channel(p/8); Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); Mat out4_tm = top_blob_tm.channel(p+4); Mat out5_tm = top_blob_tm.channel(p+5); Mat out6_tm = top_blob_tm.channel(p+6); Mat out7_tm = top_blob_tm.channel(p+7); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; float* output4_tm = out4_tm; float* output5_tm = out5_tm; float* output6_tm = out6_tm; float* output7_tm = out7_tm; for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { const float* bb2p0 = bb2.row(i/8); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); float32x4_t _sum0n = vdupq_n_f32(0.f); float32x4_t _sum1 = vdupq_n_f32(0.f); float32x4_t _sum1n = vdupq_n_f32(0.f); float32x4_t _sum2 = vdupq_n_f32(0.f); float32x4_t _sum2n = vdupq_n_f32(0.f); float32x4_t _sum3 = vdupq_n_f32(0.f); float32x4_t _sum3n = vdupq_n_f32(0.f); float32x4_t _sum4 = vdupq_n_f32(0.f); float32x4_t _sum4n = vdupq_n_f32(0.f); float32x4_t _sum5 = vdupq_n_f32(0.f); float32x4_t _sum5n = vdupq_n_f32(0.f); float32x4_t _sum6 = vdupq_n_f32(0.f); float32x4_t _sum6n = vdupq_n_f32(0.f); float32x4_t _sum7 = vdupq_n_f32(0.f); float32x4_t _sum7n = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); float32x4_t _bb2p0n = vld1q_f32(bb2p0+4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p0n, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm1, 0); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p0n, _ktm1, 0); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm2, 0); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p0n, _ktm2, 0); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm3, 0); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p0n, _ktm3, 0); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p1 = vld1q_f32(bb2p0); float32x4_t _bb2p1n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p1, _ktm0, 1); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p1n, _ktm0, 1); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p1, _ktm1, 1); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p1n, _ktm1, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p1, _ktm2, 1); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p1n, _ktm2, 1); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p1, _ktm3, 1); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p1n, _ktm3, 1); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p2 = vld1q_f32(bb2p0); float32x4_t _bb2p2n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p2, _ktm0, 2); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p2n, _ktm0, 2); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p2, _ktm1, 2); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p2n, _ktm1, 2); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p2, _ktm2, 2); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p2n, _ktm2, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p2, _ktm3, 2); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p2n, _ktm3, 2); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p3 = vld1q_f32(bb2p0); float32x4_t _bb2p3n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p3, _ktm0, 3); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p3n, _ktm0, 3); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p3, _ktm1, 3); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p3n, _ktm1, 3); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p3, _ktm2, 3); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p3n, _ktm2, 3); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p3, _ktm3, 3); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p3n, _ktm3, 3); asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm4 = vld1q_f32(ktm0 + 0); float32x4_t _ktm5 = vld1q_f32(ktm0 + 4); float32x4_t _ktm6 = vld1q_f32(ktm0 + 8); float32x4_t _ktm7 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum4 = vmlaq_laneq_f32(_sum4, _bb2p0, _ktm4, 0); _sum4n = vmlaq_laneq_f32(_sum4n, _bb2p0n, _ktm4, 0); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p0, _ktm5, 0); _sum5n = vmlaq_laneq_f32(_sum5n, _bb2p0n, _ktm5, 0); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p0, _ktm6, 0); _sum6n = vmlaq_laneq_f32(_sum6n, _bb2p0n, _ktm6, 0); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p0, _ktm7, 0); _sum7n = vmlaq_laneq_f32(_sum7n, _bb2p0n, _ktm7, 0); _sum4 = vmlaq_laneq_f32(_sum4, _bb2p1, _ktm4, 1); _sum4n = vmlaq_laneq_f32(_sum4n, _bb2p1n, _ktm4, 1); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p1, _ktm5, 1); _sum5n = vmlaq_laneq_f32(_sum5n, _bb2p1n, _ktm5, 1); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p1, _ktm6, 1); _sum6n = vmlaq_laneq_f32(_sum6n, _bb2p1n, _ktm6, 1); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p1, _ktm7, 1); _sum7n = vmlaq_laneq_f32(_sum7n, _bb2p1n, _ktm7, 1); _sum4 = vmlaq_laneq_f32(_sum4, _bb2p2, _ktm4, 2); _sum4n = vmlaq_laneq_f32(_sum4n, _bb2p2n, _ktm4, 2); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p2, _ktm5, 2); _sum5n = vmlaq_laneq_f32(_sum5n, _bb2p2n, _ktm5, 2); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p2, _ktm6, 2); _sum6n = vmlaq_laneq_f32(_sum6n, _bb2p2n, _ktm6, 2); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p2, _ktm7, 2); _sum7n = vmlaq_laneq_f32(_sum7n, _bb2p2n, _ktm7, 2); _sum4 = vmlaq_laneq_f32(_sum4, _bb2p3, _ktm4, 3); _sum4n = vmlaq_laneq_f32(_sum4n, _bb2p3n, _ktm4, 3); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p3, _ktm5, 3); _sum5n = vmlaq_laneq_f32(_sum5n, _bb2p3n, _ktm5, 3); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p3, _ktm6, 3); _sum6n = vmlaq_laneq_f32(_sum6n, _bb2p3n, _ktm6, 3); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p3, _ktm7, 3); _sum7n = vmlaq_laneq_f32(_sum7n, _bb2p3n, _ktm7, 3); } for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); float32x4_t _bb2p0n = vld1q_f32(bb2p0+4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p0n, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm0, 1); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p0n, _ktm0, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm0, 2); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p0n, _ktm0, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm0, 3); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p0n, _ktm0, 3); asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm1 = vld1q_f32(ktm0); ktm0 += 4; _sum4 = vmlaq_laneq_f32(_sum4, _bb2p0, _ktm1, 0); _sum4n = vmlaq_laneq_f32(_sum4n, _bb2p0n, _ktm1, 0); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p0, _ktm1, 1); _sum5n = vmlaq_laneq_f32(_sum5n, _bb2p0n, _ktm1, 1); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p0, _ktm1, 2); _sum6n = vmlaq_laneq_f32(_sum6n, _bb2p0n, _ktm1, 2); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p0, _ktm1, 3); _sum7n = vmlaq_laneq_f32(_sum7n, _bb2p0n, _ktm1, 3); } vst1q_f32(output0_tm, _sum0); vst1q_f32(output0_tm+4, _sum0n); vst1q_f32(output1_tm, _sum1); vst1q_f32(output1_tm+4, _sum1n); vst1q_f32(output2_tm, _sum2); vst1q_f32(output2_tm+4, _sum2n); vst1q_f32(output3_tm, _sum3); vst1q_f32(output3_tm+4, _sum3n); vst1q_f32(output4_tm, _sum4); vst1q_f32(output4_tm+4, _sum4n); vst1q_f32(output5_tm, _sum5); vst1q_f32(output5_tm+4, _sum5n); vst1q_f32(output6_tm, _sum6); vst1q_f32(output6_tm+4, _sum6n); vst1q_f32(output7_tm, _sum7); vst1q_f32(output7_tm+4, _sum7n); output0_tm += 8; output1_tm += 8; output2_tm += 8; output3_tm += 8; output4_tm += 8; output5_tm += 8; output6_tm += 8; output7_tm += 8; } for (; i+3<tiles; i+=4) { const float* bb2p0 = bb2.row(i/8+(i%8)/4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); float32x4_t _sum1 = vdupq_n_f32(0.f); float32x4_t _sum2 = vdupq_n_f32(0.f); float32x4_t _sum3 = vdupq_n_f32(0.f); float32x4_t _sum4 = vdupq_n_f32(0.f); float32x4_t _sum5 = vdupq_n_f32(0.f); float32x4_t _sum6 = vdupq_n_f32(0.f); float32x4_t _sum7 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0 + 0); float32x4_t _bb2p1 = vld1q_f32(bb2p0 + 4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm1, 0); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm2, 0); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm3, 0); _sum0 = vmlaq_laneq_f32(_sum0, _bb2p1, _ktm0, 1); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p1, _ktm1, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p1, _ktm2, 1); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p1, _ktm3, 1); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p2 = vld1q_f32(bb2p0 + 0); float32x4_t _bb2p3 = vld1q_f32(bb2p0 + 4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p2, _ktm0, 2); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p2, _ktm1, 2); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p2, _ktm2, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p2, _ktm3, 2); _sum0 = vmlaq_laneq_f32(_sum0, _bb2p3, _ktm0, 3); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p3, _ktm1, 3); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p3, _ktm2, 3); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p3, _ktm3, 3); asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm4 = vld1q_f32(ktm0 + 0); float32x4_t _ktm5 = vld1q_f32(ktm0 + 4); float32x4_t _ktm6 = vld1q_f32(ktm0 + 8); float32x4_t _ktm7 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum4 = vmlaq_laneq_f32(_sum4, _bb2p0, _ktm4, 0); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p0, _ktm5, 0); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p0, _ktm6, 0); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p0, _ktm7, 0); _sum4 = vmlaq_laneq_f32(_sum4, _bb2p1, _ktm4, 1); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p1, _ktm5, 1); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p1, _ktm6, 1); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p1, _ktm7, 1); _sum4 = vmlaq_laneq_f32(_sum4, _bb2p2, _ktm4, 2); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p2, _ktm5, 2); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p2, _ktm6, 2); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p2, _ktm7, 2); _sum4 = vmlaq_laneq_f32(_sum4, _bb2p3, _ktm4, 3); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p3, _ktm5, 3); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p3, _ktm6, 3); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p3, _ktm7, 3); } for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm0, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm0, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm0, 3); asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm1 = vld1q_f32(ktm0); ktm0 += 4; _sum4 = vmlaq_laneq_f32(_sum4, _bb2p0, _ktm1, 0); _sum5 = vmlaq_laneq_f32(_sum5, _bb2p0, _ktm1, 1); _sum6 = vmlaq_laneq_f32(_sum6, _bb2p0, _ktm1, 2); _sum7 = vmlaq_laneq_f32(_sum7, _bb2p0, _ktm1, 3); } vst1q_f32(output0_tm, _sum0); vst1q_f32(output1_tm, _sum1); vst1q_f32(output2_tm, _sum2); vst1q_f32(output3_tm, _sum3); vst1q_f32(output4_tm, _sum4); vst1q_f32(output5_tm, _sum5); vst1q_f32(output6_tm, _sum6); vst1q_f32(output7_tm, _sum7); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; output4_tm += 4; output5_tm += 4; output6_tm += 4; output7_tm += 4; } for (; i<tiles; i++) { const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); float32x4_t _sum1 = vdupq_n_f32(0.f); float32x4_t _sum2 = vdupq_n_f32(0.f); float32x4_t _sum3 = vdupq_n_f32(0.f); float32x4_t _sum4 = vdupq_n_f32(0.f); float32x4_t _sum5 = vdupq_n_f32(0.f); float32x4_t _sum6 = vdupq_n_f32(0.f); float32x4_t _sum7 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); _sum1 = vmlaq_f32(_sum1, _bb2p0, _ktm1); _sum2 = vmlaq_f32(_sum2, _bb2p0, _ktm2); _sum3 = vmlaq_f32(_sum3, _bb2p0, _ktm3); asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm4 = vld1q_f32(ktm0 + 0); float32x4_t _ktm5 = vld1q_f32(ktm0 + 4); float32x4_t _ktm6 = vld1q_f32(ktm0 + 8); float32x4_t _ktm7 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum4 = vmlaq_f32(_sum4, _bb2p0, _ktm4); _sum5 = vmlaq_f32(_sum5, _bb2p0, _ktm5); _sum6 = vmlaq_f32(_sum6, _bb2p0, _ktm6); _sum7 = vmlaq_f32(_sum7, _bb2p0, _ktm7); } float sum0 = vaddvq_f32(_sum0); float sum1 = vaddvq_f32(_sum1); float sum2 = vaddvq_f32(_sum2); float sum3 = vaddvq_f32(_sum3); float sum4 = vaddvq_f32(_sum4); float sum5 = vaddvq_f32(_sum5); float sum6 = vaddvq_f32(_sum6); float sum7 = vaddvq_f32(_sum7); for (; q<inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[0] * ktm0[1]; sum2 += bb2p0[0] * ktm0[2]; sum3 += bb2p0[0] * ktm0[3]; sum4 += bb2p0[0] * ktm0[4]; sum5 += bb2p0[0] * ktm0[5]; sum6 += bb2p0[0] * ktm0[6]; sum7 += bb2p0[0] * ktm0[7]; bb2p0 += 1; ktm0 += 8; } output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output4_tm[0] = sum4; output5_tm[0] = sum5; output6_tm[0] = sum6; output7_tm[0] = sum7; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; output4_tm += 1; output5_tm += 1; output6_tm += 1; output7_tm += 1; } } } int p = outch/8*8; for (; p+3<outch; p+=4) { const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4); Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { const float* bb2p0 = bb2.row(i/8); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); float32x4_t _sum0n = vdupq_n_f32(0.f); float32x4_t _sum1 = vdupq_n_f32(0.f); float32x4_t _sum1n = vdupq_n_f32(0.f); float32x4_t _sum2 = vdupq_n_f32(0.f); float32x4_t _sum2n = vdupq_n_f32(0.f); float32x4_t _sum3 = vdupq_n_f32(0.f); float32x4_t _sum3n = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); float32x4_t _bb2p0n = vld1q_f32(bb2p0+4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p0n, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm1, 0); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p0n, _ktm1, 0); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm2, 0); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p0n, _ktm2, 0); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm3, 0); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p0n, _ktm3, 0); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p1 = vld1q_f32(bb2p0); float32x4_t _bb2p1n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p1, _ktm0, 1); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p1n, _ktm0, 1); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p1, _ktm1, 1); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p1n, _ktm1, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p1, _ktm2, 1); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p1n, _ktm2, 1); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p1, _ktm3, 1); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p1n, _ktm3, 1); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p2 = vld1q_f32(bb2p0); float32x4_t _bb2p2n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p2, _ktm0, 2); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p2n, _ktm0, 2); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p2, _ktm1, 2); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p2n, _ktm1, 2); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p2, _ktm2, 2); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p2n, _ktm2, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p2, _ktm3, 2); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p2n, _ktm3, 2); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p3 = vld1q_f32(bb2p0); float32x4_t _bb2p3n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p3, _ktm0, 3); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p3n, _ktm0, 3); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p3, _ktm1, 3); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p3n, _ktm1, 3); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p3, _ktm2, 3); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p3n, _ktm2, 3); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p3, _ktm3, 3); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p3n, _ktm3, 3); } for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); float32x4_t _bb2p0n = vld1q_f32(bb2p0+4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p0n, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm0, 1); _sum1n = vmlaq_laneq_f32(_sum1n, _bb2p0n, _ktm0, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm0, 2); _sum2n = vmlaq_laneq_f32(_sum2n, _bb2p0n, _ktm0, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm0, 3); _sum3n = vmlaq_laneq_f32(_sum3n, _bb2p0n, _ktm0, 3); } vst1q_f32(output0_tm, _sum0); vst1q_f32(output0_tm+4, _sum0n); vst1q_f32(output1_tm, _sum1); vst1q_f32(output1_tm+4, _sum1n); vst1q_f32(output2_tm, _sum2); vst1q_f32(output2_tm+4, _sum2n); vst1q_f32(output3_tm, _sum3); vst1q_f32(output3_tm+4, _sum3n); output0_tm += 8; output1_tm += 8; output2_tm += 8; output3_tm += 8; } for (; i+3<tiles; i+=4) { const float* bb2p0 = bb2.row(i/8+(i%8)/4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); float32x4_t _sum1 = vdupq_n_f32(0.f); float32x4_t _sum2 = vdupq_n_f32(0.f); float32x4_t _sum3 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0 + 0); float32x4_t _bb2p1 = vld1q_f32(bb2p0 + 4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm1, 0); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm2, 0); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm3, 0); _sum0 = vmlaq_laneq_f32(_sum0, _bb2p1, _ktm0, 1); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p1, _ktm1, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p1, _ktm2, 1); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p1, _ktm3, 1); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p2 = vld1q_f32(bb2p0 + 0); float32x4_t _bb2p3 = vld1q_f32(bb2p0 + 4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p2, _ktm0, 2); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p2, _ktm1, 2); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p2, _ktm2, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p2, _ktm3, 2); _sum0 = vmlaq_laneq_f32(_sum0, _bb2p3, _ktm0, 3); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p3, _ktm1, 3); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p3, _ktm2, 3); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p3, _ktm3, 3); } for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum1 = vmlaq_laneq_f32(_sum1, _bb2p0, _ktm0, 1); _sum2 = vmlaq_laneq_f32(_sum2, _bb2p0, _ktm0, 2); _sum3 = vmlaq_laneq_f32(_sum3, _bb2p0, _ktm0, 3); } vst1q_f32(output0_tm, _sum0); vst1q_f32(output1_tm, _sum1); vst1q_f32(output2_tm, _sum2); vst1q_f32(output3_tm, _sum3); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } for (; i<tiles; i++) { const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); float32x4_t _sum1 = vdupq_n_f32(0.f); float32x4_t _sum2 = vdupq_n_f32(0.f); float32x4_t _sum3 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); _sum1 = vmlaq_f32(_sum1, _bb2p0, _ktm1); _sum2 = vmlaq_f32(_sum2, _bb2p0, _ktm2); _sum3 = vmlaq_f32(_sum3, _bb2p0, _ktm3); } float sum0 = vaddvq_f32(_sum0); float sum1 = vaddvq_f32(_sum1); float sum2 = vaddvq_f32(_sum2); float sum3 = vaddvq_f32(_sum3); for (; q<inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[0] * ktm0[1]; sum2 += bb2p0[0] * ktm0[2]; sum3 += bb2p0[0] * ktm0[3]; bb2p0 += 1; ktm0 += 4; } output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } for (; p<outch; p++) { const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4+p%4); Mat out0_tm = top_blob_tm.channel(p); float* output0_tm = out0_tm; for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i=0; for (; i+7<tiles; i+=8) { const float* bb2p0 = bb2.row(i/8); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); float32x4_t _sum0n = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); float32x4_t _bb2p0n = vld1q_f32(bb2p0+4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p0n, _ktm0, 0); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p1 = vld1q_f32(bb2p0); float32x4_t _bb2p1n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p1, _ktm0, 1); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p1n, _ktm0, 1); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p2 = vld1q_f32(bb2p0); float32x4_t _bb2p2n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p2, _ktm0, 2); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p2n, _ktm0, 2); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p3 = vld1q_f32(bb2p0); float32x4_t _bb2p3n = vld1q_f32(bb2p0+4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p3, _ktm0, 3); _sum0n = vmlaq_laneq_f32(_sum0n, _bb2p3n, _ktm0, 3); } for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); float32x4_t _bb2p0n = vld1q_f32(bb2p0+4); bb2p0 += 8; float32x4_t _ktm0 = vdupq_n_f32(ktm0[0]); ktm0 += 1; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); _sum0n = vmlaq_f32(_sum0n, _bb2p0n, _ktm0); } vst1q_f32(output0_tm, _sum0); vst1q_f32(output0_tm+4, _sum0n); output0_tm += 8; } for (; i+3<tiles; i+=4) { const float* bb2p0 = bb2.row(i/8+(i%8)/4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0 + 0); float32x4_t _bb2p1 = vld1q_f32(bb2p0 + 4); bb2p0 += 8; asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p0, _ktm0, 0); _sum0 = vmlaq_laneq_f32(_sum0, _bb2p1, _ktm0, 1); asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(bb2p0) :); float32x4_t _bb2p2 = vld1q_f32(bb2p0 + 0); float32x4_t _bb2p3 = vld1q_f32(bb2p0 + 4); bb2p0 += 8; _sum0 = vmlaq_laneq_f32(_sum0, _bb2p2, _ktm0, 2); _sum0 = vmlaq_laneq_f32(_sum0, _bb2p3, _ktm0, 3); } for (; q<inch; q++) { asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; float32x4_t _ktm0 = vdupq_n_f32(ktm0[0]); ktm0 += 1; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); } vst1q_f32(output0_tm, _sum0); output0_tm += 4; } for (; i<tiles; i++) { const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); int q=0; for (; q+3<inch; q+=4) { asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); } float sum0 = vaddvq_f32(_sum0); for (; q<inch; q++) { sum0 += bb2p0[0] * ktm0[0]; bb2p0 += 1; ktm0 += 1; } output0_tm[0] = sum0; output0_tm += 1; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = { 4.f, 8.f, 16.f, 32.f }; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { #if __ARM_NEON const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles*8); const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles*16); const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles*24); for (int m=0; m+3<8; m+=4) { float32x4_t _output0_tm_00; float32x4_t _output0_tm_11; float32x4_t _output0_tm_22; float32x4_t _output0_tm_33; float32x4_t _output0_tm_44; float32x4_t _output0_tm_55; float32x4_t _output0_tm_66; float32x4_t _output0_tm_77; _output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0); _output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1); _output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2); _output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0 += out0_tm.w*tiles*25; output0_tm1 += out0_tm.w*tiles*25; output0_tm2 += out0_tm.w*tiles*25; output0_tm3 += out0_tm.w*tiles*25; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m=0; m+1<6; m+=2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8*2; t1 += 8*2; output0 += outw*2; output1 += outw*2; } #else const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm_1 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm_2 = out0_tm.row(i * w_tm/8 + j + tiles*2); const float* output0_tm_3 = out0_tm.row(i * w_tm/8 + j + tiles*3); const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles*4); const float* output0_tm_5 = out0_tm.row(i * w_tm/8 + j + tiles*5); const float* output0_tm_6 = out0_tm.row(i * w_tm/8 + j + tiles*6); const float* output0_tm_7 = out0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m<8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 8; output0_tm_1 += out0_tm.w * tiles * 8; output0_tm_2 += out0_tm.w * tiles * 8; output0_tm_3 += out0_tm.w * tiles * 8; output0_tm_4 += out0_tm.w * tiles * 8; output0_tm_5 += out0_tm.w * tiles * 8; output0_tm_6 += out0_tm.w * tiles * 8; output0_tm_7 += out0_tm.w * tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } #endif static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p*inch*9; const float* k1 = kernel + (p+1)*inch*9; for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0+3); float32x4_t _k06 = vld1q_f32(k0+6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1+3); float32x4_t _k16 = vld1q_f32(k1+6); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0 "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n"// v6 = _sum0 "fmul v12.4s, v8.4s, %12.s[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n"// v7 = _sum1 "fmul v13.4s, v8.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld2 {v10.4s, v11.4s}, [%3] \n"// v10 "fmla v6.4s, v9.4s, %12.s[1] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v7.4s, v9.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n"// r1 "fmla v12.4s, v14.4s, %12.s[2] \n" "fmla v13.4s, v14.4s, %15.s[2] \n" "prfm pldl1keep, [%4, #128] \n" "ld2 {v10.4s, v11.4s}, [%4] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v12.4s, v9.4s, %13.s[1] \n" "fmla v13.4s, v9.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n"// r2 "fmla v6.4s, v14.4s, %13.s[2] \n" "fmla v7.4s, v14.4s, %16.s[2] \n" "prfm pldl1keep, [%5, #128] \n" "ld2 {v10.4s, v11.4s}, [%5] \n" "fmla v12.4s, v8.4s, %14.s[0] \n" "fmla v13.4s, v8.4s, %17.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v6.4s, v9.4s, %14.s[1] \n" "fmla v7.4s, v9.4s, %17.s[1] \n" "fmla v12.4s, v14.4s, %14.s[2] \n" "fmla v13.4s, v14.4s, %17.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0 "fadd v6.4s, v6.4s, v12.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "bne 0b \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0 "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n"// q6 = _sum0 "vmul.f32 q12, q8, %e12[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n"// q7 = _sum1 "vmul.f32 q13, q8, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d20-d21}, [%3] \n"// q10 "vmla.f32 q6, q9, %e12[1] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q7, q9, %e15[1] \n" "pld [%4, #256] \n" "vld2.f32 {d16-d19}, [%4]! \n"// r1 "vmla.f32 q12, q11, %f12[0] \n" "vmla.f32 q13, q11, %f15[0] \n" "pld [%4, #128] \n" "vld2.f32 {d20-d21}, [%4] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q12, q9, %e13[1] \n" "vmla.f32 q13, q9, %e16[1] \n" "pld [%5, #256] \n" "vld2.f32 {d16-d19}, [%5]! \n"// r2 "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "pld [%5, #128] \n" "vld2.f32 {d20-d21}, [%5] \n" "vmla.f32 q12, q8, %e14[0] \n" "vmla.f32 q13, q8, %e17[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q6, q9, %e14[1] \n" "vmla.f32 q7, q9, %e17[1] \n" "vmla.f32 q12, q11, %f14[0] \n" "vmla.f32 q13, q11, %f17[0] \n" "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0 "vadd.f32 q6, q6, q12 \n" "vadd.f32 q7, q7, q13 \n" "subs %0, #1 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "bne 0b \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr0++; outptr1++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9; k1 += 9; } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } }
extra_data.c
// // Created by sachetto on 01/10/17. // #include "../monodomain/config/extra_data_config.h" #include "../libraries_common/config_helpers.h" SET_EXTRA_DATA(set_extra_data_for_fibrosis_sphere) { uint32_t num_active_cells = the_grid->num_active_cells; *extra_data_size = sizeof(float)*(num_active_cells+1); float *fibs = (float*)malloc(*extra_data_size); struct cell_node ** ac = the_grid->active_cells; real atpi = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, atpi, config, "atpi"); float plain_center = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, plain_center, config, "plain_center"); float border_zone_size = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, border_zone_size, config, "border_zone_size"); float sphere_radius = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, sphere_radius, config, "sphere_radius"); fibs[0] = atpi; int i; #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { if(ac[i]->fibrotic) { fibs[i+1] = 0.0; } else if(ac[i]->border_zone) { float center_x = (float)ac[i]->center_x; float center_y = (float)ac[i]->center_y; //TODO: Maybe we want the distance from the Z as well //float center_z = (float)ac[i]->center_z; float distanceFromCenter = sqrtf((center_x - plain_center)*(center_x - plain_center) + (center_y - plain_center)*(center_y - plain_center)); distanceFromCenter = (distanceFromCenter - sphere_radius)/border_zone_size; fibs[i+1] = distanceFromCenter; } else { fibs[i+1] = 1.0; } } return (void*)fibs; } SET_EXTRA_DATA(set_extra_data_for_fibrosis_plain) { uint32_t num_active_cells = the_grid->num_active_cells; *extra_data_size = sizeof(float)*(num_active_cells+1); float *fibs = (float*)calloc(num_active_cells+1, sizeof(float)); real atpi = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, atpi, config, "atpi"); fibs[0] = atpi; return (void*)fibs; } SET_EXTRA_DATA(set_extra_data_for_no_fibrosis) { uint32_t num_active_cells = the_grid->num_active_cells; *extra_data_size = sizeof(float)*(num_active_cells+1); float *fibs = (float*)malloc(*extra_data_size); real atpi = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,atpi, config, "atpi"); fibs[0] = atpi; for(int i = 1; i < num_active_cells+1; i++) { fibs[i] = 1.0; } return (void*)fibs; } SET_EXTRA_DATA(set_extra_data_for_fibrosis) { uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node ** ac = the_grid->active_cells; *extra_data_size = sizeof(real)*(num_active_cells+1); real *fibs = (real*)malloc(*extra_data_size); fibs[0] = 6.8; for(int i = 0; i < num_active_cells; i++) { if(ac[i]->fibrotic) { fibs[i+1] = 0.0; } else { fibs[i+1] = 1.0; } } return (void*)fibs; } SET_EXTRA_DATA(set_extra_data_for_human_full_mesh) { uint32_t num_active_cells = the_grid->num_active_cells; *extra_data_size = sizeof(float)*(num_active_cells+1); float *fibs = (float*)malloc(*extra_data_size); struct cell_node ** ac = the_grid->active_cells; real atpi = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,atpi, config, "atpi"); fibs[0] = atpi; double small_scar_center_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_x, config, "small_scar_center_x"); double small_scar_center_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_y, config, "small_scar_center_y"); double small_scar_center_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_z, config, "small_scar_center_z"); double big_scar_center_x = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_x, config, "big_scar_center_x"); double big_scar_center_y = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_y, config, "big_scar_center_y"); double big_scar_center_z = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_z, config, "big_scar_center_z"); double bz_size_big = 0; double bz_size_small = 0; double dist_big = 0; double dist_small = 0; int i; //#pragma omp parallel for private(dist_big, dist_small) reduction(max: bz_size_big, bz_size_small) #pragma omp parallel for private(dist_big, dist_small) for (i = 0; i < num_active_cells; i++) { if (ac[i]->active && ac[i]->border_zone) { double center_x = ac[i]->center_x; double center_y = ac[i]->center_y; double center_z = ac[i]->center_z; if(ac[i]->scar_type == 'b') { dist_big = sqrt((center_x - big_scar_center_x) * (center_x - big_scar_center_x) + (center_y - big_scar_center_y) * (center_y - big_scar_center_y) + (center_z - big_scar_center_z) * (center_z - big_scar_center_z)); #pragma omp critical(big) if (dist_big > bz_size_big) { bz_size_big = dist_big; } } else if(ac[i]->scar_type == 's') { dist_small = sqrt((center_x - small_scar_center_x) * (center_x - small_scar_center_x) + (center_y - small_scar_center_y) * (center_y - small_scar_center_y) + (center_z - small_scar_center_z) * (center_z - small_scar_center_z)); #pragma omp critical(small) if (dist_small > bz_size_small) { bz_size_small = dist_small; } } } } #pragma omp parallel for private(dist_big, dist_small) for (i = 0; i < num_active_cells; i++) { if (ac[i]->active) { if(ac[i]->fibrotic) { fibs[i+1] = 0.0f; } else if (ac[i]->border_zone) { double center_x = ac[i]->center_x; double center_y = ac[i]->center_y; double center_z = ac[i]->center_z; if(ac[i]->scar_type == 'b') { dist_big = sqrt((center_x - big_scar_center_x) * (center_x - big_scar_center_x) + (center_y - big_scar_center_y) * (center_y - big_scar_center_y) + (center_z - big_scar_center_z) * (center_z - big_scar_center_z)); fibs[i+1] = (real)(dist_big / bz_size_big); } else if(ac[i]->scar_type == 's') { dist_small = sqrt((center_x - small_scar_center_x) * (center_x - small_scar_center_x) + (center_y - small_scar_center_y) * (center_y - small_scar_center_y) + (center_z - small_scar_center_z) * (center_z - small_scar_center_z)); fibs[i+1] = (real)(dist_small / bz_size_small); } else { fibs[i+1] = 1.0f; } } } } return (void*)fibs; } SET_EXTRA_DATA(set_extra_data_for_scar_wedge) { uint32_t num_active_cells = the_grid->num_active_cells; *extra_data_size = sizeof(float)*(num_active_cells+1); float *fibs = (float*)malloc(*extra_data_size); struct cell_node ** ac = the_grid->active_cells; real atpi = 0.0; GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real,atpi, config, "atpi"); fibs[0] = atpi; char *scar_size; GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR (scar_size, config, "scar_size"); uint8_t size_code; if(strcmp(scar_size, "big") == 0) { size_code = 0; } else if(strcmp(scar_size, "small") == 0) { size_code = 1; } else { printf("Function: set_extra_data_for_scar_edge, invalid scar size %s. Valid sizes are big or small. Exiting!\n", scar_size); exit(EXIT_FAILURE); } double scar_center_x; double scar_center_y; double scar_center_z; ////Fibrosis configuration //BIG SCAR if(size_code == 0) { scar_center_x = 95300; scar_center_y = 81600; scar_center_z = 36800; } else { scar_center_x = 52469; scar_center_y = 83225; scar_center_z = 24791; } double bz_size = 0.0; double dist; int i; // #pragma omp parallel for private(dist) reduction(max: bz_size) #pragma omp parallel for private(dist) for (i = 0; i < num_active_cells; i++) { if(ac[i]->active) { if(ac[i]->border_zone) { double center_x = ac[i]->center_x; double center_y = ac[i]->center_y; double center_z = ac[i]->center_z; dist = sqrt((center_x - scar_center_x)*(center_x - scar_center_x) + (center_y - scar_center_y)*(center_y - scar_center_y) + (center_z - scar_center_z)*(center_z - scar_center_z) ); #pragma omp critical if(dist > bz_size) { bz_size = dist; } } } } #pragma omp parallel for private(dist) for (i = 0; i < num_active_cells; i++) { if(ac[i]->active) { if(ac[i]->fibrotic) { fibs[i+1] = 0.0; } else if(ac[i]->border_zone) { double center_x = ac[i]->center_x; double center_y = ac[i]->center_y; double center_z = ac[i]->center_z; dist = sqrt((center_x - scar_center_x)*(center_x - scar_center_x) + (center_y - scar_center_y)*(center_y - scar_center_y) + (center_z - scar_center_z)*(center_z - scar_center_z) ); dist = dist/bz_size; fibs[i+1] = (real)dist; } else { fibs[i+1] = 1.0f; } } } return (void*)fibs; }
GB_unaryop__identity_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_uint8 // op(A') function: GB_tran__identity_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gt_mm.c
/* * PROJECT: GEM-Tools library * FILE: gt_mm.c * DATE: 01/02/2013 * AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com> * DESCRIPTION: * Memory Manager provides memory allocation functions. Different types of memory are supported. * - UnitMemory * Allocate relative small chunks of memory relying on the regular memory manager, * usually malloc/calloc using a BuddySystem (Helper functions) * - BulkMemory * Allocate big chunks of memory and resort to disk if memory is not enough * - SlabMemory * Relative big amounts of objects allocated all at once (like the LINUX slab allocator) * Objects of a certain type are ready to go inside the slab, thus reducing * the overhead of malloc/setup/free cycles along the program * - PoolMemory * Pool of Slabs as gather all slabs needed along a program * The goal is to minimize all memory malloc/setup/free overhead * Offers thread safe allocation of slabs as to balance memory consumption across threads */ // TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO // 1.- TCMalloc : Thread-Caching Malloc // 2.- nedmalloc() // 4.- madvise() / readahead() / posix_fadvise() // TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO #include "gt_mm.h" // In some environments MAP_HUGETLB can be undefined #ifndef MAP_HUGETLB #define MAP_HUGETLB 0 #endif #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS 0 // TODO: disable for mac compatibility #endif #ifndef MAP_POPULATE #define MAP_POPULATE 0 // TODO: disable for mac compatibility #endif /* * Memory Alignment Utils */ const uint64_t gt_mm_mem_alignment_bits_mask[] = { // Check Memory Alignment Bits (Masks) 0x0000000000000001lu, /* 16 bits aligned ( 2B / 2^4) */ 0x0000000000000003lu, /* 32 bits aligned ( 4B / 2^5) */ 0x0000000000000007lu, /* 64 bits aligned ( 8B / 2^6) */ 0x000000000000000Flu, /* 128 bits aligned ( 16B / 2^7) */ 0x000000000000001Flu, /* 256 bits aligned ( 32B / 2^8) */ 0x000000000000003Flu, /* 512 bits aligned ( 64B / 2^9) */ 0x000000000000007Flu, /* 1024 bits aligned ( 1KB / 2^10) */ 0x00000000000000FFlu, /* 2048 bits aligned ( 2KB / 2^11) */ 0x00000000000001FFlu, /* 4096 bits aligned ( 4KB / 2^12) RegularPage Size*/ 0x00000000000003FFlu, /* 8192 bits aligned ( 8KB / 2^13) */ 0x00000000000007FFlu, /* 16384 bits aligned (16KB / 2^14) */ 0x0000000000000FFFlu, /* 32768 bits aligned (32KB / 2^15) */ 0x000000000003FFFFlu, /* n/a bits aligned ( 2MB / 2^21) RegularPageHugeTLB Size */ 0x000000000007FFFFlu, /* n/a bits aligned ( 4MB / 2^21) */ }; /* * MMap Constants/Values */ int gt_mm_proc_flags[3] = { PROT_READ, PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE }; int gt_mm_mmap_mode[3] = { MAP_PRIVATE, MAP_SHARED, MAP_SHARED }; /* * Temporal folder path */ char* gt_mm_temp_folder_path = GT_MM_DEFAULT_TMP_FOLDER; GT_INLINE char* gt_mm_get_tmp_folder() { return gt_mm_temp_folder_path; } GT_INLINE void gt_mm_set_tmp_folder(char* const tmp_folder_path) { GT_NULL_CHECK(tmp_folder_path); gt_mm_temp_folder_path = tmp_folder_path; } /* * UnitMemory * Allocate relative small chunks of memory relying on the regular memory manager, * usually malloc/calloc using a BuddySystem (Helper functions) */ GT_INLINE void* gt_malloc_(uint64_t const num_elements,const uint64_t size_element,const bool init_mem,const int init_value) { const uint64_t total_memory = num_elements*size_element; void* allocated_mem; if (gt_expect_false(init_mem && init_value==0)) { allocated_mem = calloc(num_elements,size_element); gt_cond_fatal_error(!allocated_mem,MEM_CALLOC_INFO,num_elements,size_element); } else { allocated_mem = malloc(total_memory); gt_cond_fatal_error(!allocated_mem,MEM_ALLOC_INFO,total_memory); } if (gt_expect_false(init_mem && init_value!=0)) memset(allocated_mem,init_value,total_memory); //GT_MM_PRINT_MEM_ALIGMENT(allocated_mem); // Debug return allocated_mem; } GT_INLINE void* gt_malloc_nothrow(uint64_t const num_elements,const uint64_t size_element,const bool init_mem,const int init_value) { const uint64_t total_memory = num_elements*size_element; void* const allocated_mem = (gt_expect_false(init_mem && init_value==0)) ? calloc(num_elements,size_element) : malloc(total_memory); if (!allocated_mem) return NULL; if (gt_expect_false(init_mem && init_value!=0)) memset(allocated_mem,init_value,total_memory); //GT_MM_PRINT_MEM_ALIGMENT(allocated_mem); // Debug return allocated_mem; } GT_INLINE void gt_free(void* mem_addr) { free(mem_addr); } /* * BulkMemory * Allocate big chunks of memory and resort to disk if memory is not enough */ GT_INLINE gt_mm* gt_mm_bulk_malloc(const uint64_t num_bytes,const bool init_mem) { GT_ZERO_CHECK(num_bytes); void* memory = gt_malloc_nothrow(num_bytes,1,init_mem,0); if (gt_expect_true(memory!=NULL)) { // Fits in HEAP gt_mm* const mm = gt_alloc(gt_mm); mm->memory = memory; mm->mem_type = GT_MM_HEAP; mm->mode = GT_MM_READ_WRITE; mm->allocated = num_bytes; mm->cursor = mm->memory; GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug return mm; } else { // Resort to MMAP in disk gt_warn(MEM_ALLOC_DISK,num_bytes); return gt_mm_bulk_mmalloc_temp(num_bytes); } } GT_INLINE gt_mm* gt_mm_bulk_mmalloc(const uint64_t num_bytes,const bool use_huge_pages) { GT_ZERO_CHECK(num_bytes); // Allocate handler gt_mm* const mm = gt_alloc(gt_mm); /* * MMap memory (anonymous) * - MAP_PRIVATE => Fits in RAM+SWAP * - MAP_ANONYMOUS => The mapping is not backed by any file; its contents are initialized to zero. * Map against /dev/zero (Allocate anonymous memory segment, without open) * - MAP_NORESERVE to explicitly enable swap space overcommitting. (echo 1 > /proc/sys/vm/overcommit_memory) * Useful when you wish to map a file larger than the amount of free memory * available on your system (RAM+SWAP). * In this case, the lazy swap space reservation may cause the program * to consume all the free RAM and swap on the system, eventually * triggering the OOM killer (Linux) or causing a SIGSEGV. */ int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; if (use_huge_pages) flags |= MAP_HUGETLB; mm->memory = mmap(0,num_bytes,PROT_READ|PROT_WRITE,flags,-1,0); gt_cond_fatal_error__perror(mm->memory==MAP_FAILED,MEM_ALLOC_MMAP_FAIL,num_bytes); mm->cursor = mm->memory; // Set MM mm->mem_type = GT_MM_MMAPPED; mm->mode = GT_MM_READ_WRITE; mm->allocated = num_bytes; mm->fd = -1; mm->file_name = NULL; // GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug return mm; } GT_INLINE gt_mm* gt_mm_bulk_mmap_file(char* const file_name,const gt_mm_mode mode,const bool populate_page_tables) { GT_NULL_CHECK(file_name); // Allocate handler gt_mm* const mm = gt_alloc(gt_mm); // Retrieve input file info struct stat stat_info; gt_cond_fatal_error__perror(stat(file_name,&stat_info)==-1,FILE_STAT,file_name); // Open file descriptor mm->fd = open(file_name,gt_fm_oflags[mode],S_IRUSR); gt_cond_fatal_error__perror(mm->fd==-1,FILE_OPEN,file_name); /* * Mmap file * - @mode:: * GT_MM_READ_ONLY => MAP_PRIVATE (no copy-on-write as it's not allowed) * GT_MM_WRITE_ONLY or GT_MM_READ_WRITE => MAP_SHARED * - MAP_POPULATE (since Linux 2.5.46) * Populate (prefault) page tables for a mapping. For a file mapping, this causes * read-ahead on the file. Later accesses to the mapping will not be blocked by page faults. * MAP_POPULATE is only supported for private mappings since Linux 2.6.23. */ int flags = gt_mm_mmap_mode[mode]; if (populate_page_tables) flags |= MAP_POPULATE; mm->memory = mmap(0,stat_info.st_size,gt_mm_proc_flags[mode],flags,mm->fd,0); gt_cond_fatal_error__perror(mm->memory==MAP_FAILED,SYS_MMAP_FILE,file_name); mm->cursor = mm->memory; // Set MM mm->mem_type = GT_MM_MMAPPED; mm->mode = mode; mm->allocated = stat_info.st_size; mm->file_name = gt_strndup(file_name,gt_strlen(file_name)); // GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug return mm; } GT_INLINE gt_mm* gt_mm_bulk_mmalloc_temp(const uint64_t num_bytes) { GT_ZERO_CHECK(num_bytes); // Allocate handler gt_mm* const mm = gt_alloc(gt_mm); // TemporalMemory (backed by a file) mm->file_name = gt_calloc(strlen(gt_mm_get_tmp_folder())+22,char,true); sprintf(mm->file_name,"%sgt_mmalloc_temp_XXXXXX",gt_mm_get_tmp_folder()); // Create temporary file mm->fd = mkstemp(mm->file_name); gt_cond_fatal_error__perror(mm->fd==-1,SYS_MKSTEMP,mm->file_name); gt_cond_fatal_error__perror(unlink(mm->file_name),SYS_HANDLE_TMP); // Make it temporary // Set the size of the temporary file (disk allocation) gt_cond_fatal_error__perror(lseek(mm->fd,num_bytes-1,SEEK_SET)==-1,SYS_HANDLE_TMP); gt_cond_fatal_error__perror(write(mm->fd,"",1)<=0,SYS_HANDLE_TMP); gt_cond_fatal_error__perror(lseek(mm->fd,0,SEEK_SET)==-1,SYS_HANDLE_TMP); /* * Mmap file. * - MAP_SHARED as we the mapping will be reflected on disk (no copy-on-write) * As such, the kernel knows it can always free up memory by doing writeback. * - MAP_NORESERVE to explicitly enable swap space overcommitting. (echo 1 > /proc/sys/vm/overcommit_memory) * Useful when you wish to map a file larger than the amount of free memory * available on your system (RAM+SWAP). * In this case, the lazy swap space reservation may cause the program * to consume all the free RAM and swap on the system, eventually * triggering the OOM killer (Linux) or causing a SIGSEGV. */ mm->memory = mmap(NULL,num_bytes,PROT_READ|PROT_WRITE,MAP_SHARED|MAP_NORESERVE,mm->fd,0); gt_cond_fatal_error__perror(mm->memory==MAP_FAILED,MEM_ALLOC_MMAP_DISK_FAIL,num_bytes,mm->file_name); mm->cursor = mm->memory; // Set MM mm->mem_type = GT_MM_MMAPPED; mm->mode = GT_MM_READ_WRITE; mm->allocated = num_bytes; // GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug return mm; } GT_INLINE void gt_mm_realloc(gt_mm* const mm,const uint64_t num_bytes) { GT_MM_CHECK(mm); gt_fatal_error(NOT_IMPLEMENTED); // TODO // const uint64_t current_cursor_pos = gt_mm_get_current_position(mm); // if (mm->mem_type==GT_MM_HEAP) { // Heap BulkMemory // if (num_bytes > mm->allocated) { // mm->memory = realloc(mm->memory); // gt_cond_fatal_error(mm->memory==NULL,MEM_REALLOC); // mm->cursor = mm->memory + current_cursor_pos; // mm->allocated = num_bytes; // } // } else { // MMapped BulkMemory // if (mm->fd!=-1) { // if (mm->tmp_file) { // TemporalMemory // mremap(mm->memory,mm->allocated,num_bytes,MREMAP_MAYMOVE); // } else { // File mapped // // } // } else { // Anonymous // // } // } } GT_INLINE void gt_mm_free(gt_mm* const mm) { GT_MM_CHECK(mm); if (mm->mem_type==GT_MM_HEAP) { // Heap BulkMemory gt_free(mm->memory); } else { // MMapped BulkMemory gt_cond_fatal_error__perror(munmap(mm->memory,mm->allocated)==-1,SYS_UNMAP); if (mm->fd!=-1) { gt_cond_fatal_error__perror(close(mm->fd),SYS_HANDLE_TMP); } } gt_cfree(mm->file_name); gt_free(mm); } GT_INLINE gt_mm* gt_mm_bulk_load_file(char* const file_name,const uint64_t num_threads) { GT_NULL_CHECK(file_name); // Allocate handler gt_mm* const mm = gt_alloc(gt_mm); // Retrieve input file info struct stat stat_info; gt_cond_fatal_error__perror(stat(file_name,&stat_info)==-1,FILE_STAT,file_name); // Allocate memory to dump the content of the file mm->memory = gt_malloc(stat_info.st_size); gt_cond_fatal_error(!mm->memory,MEM_ALLOC_INFO,stat_info.st_size); mm->mem_type = GT_MM_HEAP; mm->mode = GT_MM_READ_ONLY; mm->allocated = stat_info.st_size; mm->cursor = mm->memory; GT_MM_PRINT_MEM_ALIGMENT(mm->memory); // Debug // Read the file and dump it into memory if (num_threads>1 && (stat_info.st_size > num_threads*8)) { gt_fm_bulk_read_file_parallel(file_name,mm->memory,0,0,num_threads); } else { gt_fm_bulk_read_file(file_name,mm->memory,0,0); } return mm; } GT_INLINE gt_mm* gt_mm_bulk_mload_file(char* const file_name,const uint64_t num_threads) { GT_NULL_CHECK(file_name); // Retrieve input file info struct stat stat_info; gt_cond_fatal_error__perror(stat(file_name,&stat_info)==-1,FILE_STAT,file_name); // Allocate memory to dump the content of the file gt_mm* const mm = gt_mm_bulk_mmalloc(stat_info.st_size,false); // Read the file and dump it into memory if (num_threads>1 && (stat_info.st_size > num_threads*8)) { gt_fm_bulk_read_file_parallel(file_name,mm->memory,0,0,num_threads); } else { gt_fm_bulk_read_file(file_name,mm->memory,0,0); } return mm; } /* * Accessors */ GT_INLINE void* gt_mm_get_mem(gt_mm* const mm) { GT_MM_CHECK(mm); return mm->cursor; } GT_INLINE void* gt_mm_get_base_mem(gt_mm* const mm) { GT_MM_CHECK(mm); return mm->memory; } GT_INLINE gt_mm_mode gt_mm_get_mode(gt_mm* const mm) { GT_MM_CHECK(mm); return mm->mode; } GT_INLINE void gt_mm_set_mode(gt_mm* const mm,const gt_mm_mode mode) { GT_MM_CHECK(mm); gt_fatal_error(NOT_IMPLEMENTED); // TODO } /* * Seek functions */ GT_INLINE uint64_t gt_mm_get_current_position(gt_mm* const mm) { GT_MM_CHECK(mm); return (mm->cursor-mm->memory); } GT_INLINE bool gt_mm_eom(gt_mm* const mm) { GT_MM_CHECK(mm); return gt_mm_get_current_position(mm) >= mm->allocated; } GT_INLINE void gt_mm_seek(gt_mm* const mm,const uint64_t byte_position) { GT_MM_CHECK(mm); gt_fatal_check(byte_position>=mm->allocated,MEM_CURSOR_SEEK,byte_position); mm->cursor = mm->memory + byte_position; } GT_INLINE void gt_mm_skip_forward(gt_mm* const mm,const uint64_t num_bytes) { GT_MM_CHECK(mm); mm->cursor += num_bytes; GT_MM_CHECK_SEGMENT(mm); } GT_INLINE void gt_mm_skip_backward(gt_mm* const mm,const uint64_t num_bytes) { GT_MM_CHECK(mm); mm->cursor -= num_bytes; GT_MM_CHECK_SEGMENT(mm); } GT_INLINE void gt_mm_skip_uint64(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor += 8; GT_MM_CHECK_SEGMENT(mm); } GT_INLINE void gt_mm_skip_uint32(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor += 4; GT_MM_CHECK_SEGMENT(mm); } GT_INLINE void gt_mm_skip_uint16(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor += 2; GT_MM_CHECK_SEGMENT(mm); } GT_INLINE void gt_mm_skip_uint8(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor += 1; GT_MM_CHECK_SEGMENT(mm); } GT_INLINE void gt_mm_skip_align(gt_mm* const mm,const uint64_t num_bytes) { GT_MM_CHECK(mm); GT_ZERO_CHECK(num_bytes); if (gt_expect_true(num_bytes > 1)) { mm->cursor = mm->cursor+(num_bytes-1); mm->cursor = mm->cursor-(GT_MM_CAST_ADDR(mm->cursor)%(num_bytes-1)); GT_MM_CHECK_SEGMENT(mm); gt_fatal_check(GT_MM_CAST_ADDR(mm->cursor)%(num_bytes-1)!=0,MEM_ALG_FAILED); } } GT_INLINE void gt_mm_skip_align_16(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor = GT_MM_CAST_PTR( (GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_16b) & (~GT_MM_MEM_ALIGNED_MASK_16b)); GT_MM_CHECK_SEGMENT(mm); GT_MM_CHECK_ALIGNMENT(mm,16b); } GT_INLINE void gt_mm_skip_align_32(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor = GT_MM_CAST_PTR( (GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_32b) & (~GT_MM_MEM_ALIGNED_MASK_32b)); GT_MM_CHECK_SEGMENT(mm); GT_MM_CHECK_ALIGNMENT(mm,32b); } GT_INLINE void gt_mm_skip_align_64(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor = GT_MM_CAST_PTR( (GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_64b) & (~GT_MM_MEM_ALIGNED_MASK_64b)); GT_MM_CHECK_SEGMENT(mm); GT_MM_CHECK_ALIGNMENT(mm,64b); } GT_INLINE void gt_mm_skip_align_128(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor = GT_MM_CAST_PTR( (GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_128b) & (~GT_MM_MEM_ALIGNED_MASK_128b)); GT_MM_CHECK_SEGMENT(mm); GT_MM_CHECK_ALIGNMENT(mm,128b); } GT_INLINE void gt_mm_skip_align_512(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor = GT_MM_CAST_PTR( (GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_512b) & (~GT_MM_MEM_ALIGNED_MASK_512b)); GT_MM_CHECK_SEGMENT(mm); GT_MM_CHECK_ALIGNMENT(mm,512b); } GT_INLINE void gt_mm_skip_align_1024(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor = GT_MM_CAST_PTR( (GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_1KB) & (~GT_MM_MEM_ALIGNED_MASK_1KB)); GT_MM_CHECK_SEGMENT(mm); GT_MM_CHECK_ALIGNMENT(mm,1KB); } GT_INLINE void gt_mm_skip_align_4KB(gt_mm* const mm) { GT_MM_CHECK(mm); mm->cursor = GT_MM_CAST_PTR( (GT_MM_CAST_ADDR(mm->cursor)+GT_MM_MEM_ALIGNED_MASK_4KB) & (~GT_MM_MEM_ALIGNED_MASK_4KB)); GT_MM_CHECK_SEGMENT(mm); GT_MM_CHECK_ALIGNMENT(mm,4KB); } GT_INLINE void gt_mm_skip_align_mempage(gt_mm* const mm) { GT_MM_CHECK(mm); uint64_t sz = sysconf(_SC_PAGESIZE); gt_mm_skip_align(mm,sz); } /* * Read functions */ GT_INLINE uint64_t gt_mm_read_uint64(gt_mm* const mm) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); const uint64_t data = *((uint64_t*)mm->cursor); mm->cursor += 8; return data; } GT_INLINE uint32_t gt_mm_read_uint32(gt_mm* const mm) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); const uint32_t data = *((uint32_t*)mm->cursor); mm->cursor += 4; return data; } GT_INLINE uint16_t gt_mm_read_uint16(gt_mm* const mm) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); const uint16_t data = *((uint16_t*)mm->cursor); mm->cursor += 2; return data; } GT_INLINE uint8_t gt_mm_read_uint8(gt_mm* const mm) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); const uint8_t data = *((uint8_t*)mm->cursor); mm->cursor += 1; return data; } GT_INLINE void* gt_mm_read_mem(gt_mm* const mm,const uint64_t num_bytes) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); void* const current_cursor = mm->cursor; mm->cursor += num_bytes; return current_cursor; } GT_INLINE void gt_mm_copy_mem(gt_mm* const mm,void* const dst,const uint64_t num_bytes) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); memcpy(dst,mm->cursor,num_bytes); mm->cursor += num_bytes; } GT_INLINE void gt_mm_copy_mem_parallel( gt_mm* const mm,void* const dst,const uint64_t num_bytes,const uint64_t num_threads) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); // Calculate size of each chunk const uint64_t chunk_size = num_bytes/num_threads; // num_bytes > num_threads #ifdef HAVE_OPENMP //#pragma omp parallel num_threads(num_threads) #endif { // Calculate offsets #ifdef HAVE_OPENMP const uint64_t tid = omp_get_thread_num(); #else const uint64_t tid = 0; #endif const uint64_t offset = tid*chunk_size; const uint64_t size = (tid < (num_threads-1)) ? chunk_size : num_bytes-chunk_size*tid; // Copy the chunk memcpy(dst+offset,mm->cursor+offset,size); } mm->cursor += num_bytes; } /* * Write functions */ GT_INLINE void gt_mm_write_uint64(gt_mm* const mm,const uint64_t data) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); *((uint64_t*)mm->cursor) = data; mm->cursor += 8; } GT_INLINE void gt_mm_write_uint32(gt_mm* const mm,const uint32_t data) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); *((uint32_t*)mm->cursor) = data; mm->cursor += 4; } GT_INLINE void gt_mm_write_uint16(gt_mm* const mm,const uint16_t data) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); *((uint16_t*)mm->cursor) = data; mm->cursor += 2; } GT_INLINE void gt_mm_write_uint8(gt_mm* const mm,const uint8_t data) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); *((uint8_t*)mm->cursor) = data; mm->cursor += 1; } GT_INLINE void gt_mm_write_mem(gt_mm* const mm,void* const src,const uint64_t num_bytes) { GT_MM_CHECK(mm); GT_MM_CHECK_SEGMENT(mm); // TODO } /* * SlabMemory * Relative big amounts of objects allocated all at once (like the LINUX slab allocator) * Objects of a certain type are ready to go inside the slab, thus reducing * the overhead of malloc/setup/free cycles along the program */ //GT_INLINE void* gt_mm_slab_mmalloc(gt_mm_slab* const slab,const uint64_t num_elements); // TODO //GT_INLINE void gt_mm_slab_mfree(gt_mm_slab* const slab,void* mem_addr,const uint64_t num_elements); // TODO
openmp_wrapper.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #ifdef _OPENMP #include <LightGBM/utils/log.h> #include <omp.h> #include <exception> #include <memory> #include <mutex> #include <stdexcept> #include <vector> inline int OMP_NUM_THREADS() { int ret = 1; #pragma omp parallel #pragma omp master { ret = omp_get_num_threads(); } return ret; } class ThreadExceptionHelper { public: ThreadExceptionHelper() { ex_ptr_ = nullptr; } ~ThreadExceptionHelper() { ReThrow(); } void ReThrow() { if (ex_ptr_ != nullptr) { std::rethrow_exception(ex_ptr_); } } void CaptureException() { // only catch first exception. if (ex_ptr_ != nullptr) { return; } std::unique_lock<std::mutex> guard(lock_); if (ex_ptr_ != nullptr) { return; } ex_ptr_ = std::current_exception(); } private: std::exception_ptr ex_ptr_; std::mutex lock_; }; #define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper #define OMP_LOOP_EX_BEGIN() try { #define OMP_LOOP_EX_END() \ } \ catch (std::exception & ex) { \ Log::Warning(ex.what()); \ omp_except_helper.CaptureException(); \ } \ catch (...) { \ omp_except_helper.CaptureException(); \ } #define OMP_THROW_EX() omp_except_helper.ReThrow() #else /* * To be compatible with OpenMP, define a nothrow macro which is used by gcc * openmp, but not by clang. * See also https://github.com/dmlc/dmlc-core/blob/3106c1cbdcc9fc9ef3a2c1d2196a7a6f6616c13d/include/dmlc/omp.h#L14 */ #if defined(__clang__) #undef __GOMP_NOTHROW #define __GOMP_NOTHROW #elif defined(__cplusplus) #undef __GOMP_NOTHROW #define __GOMP_NOTHROW throw() #else #undef __GOMP_NOTHROW #define __GOMP_NOTHROW __attribute__((__nothrow__)) #endif #ifdef _MSC_VER #pragma warning(disable : 4068) // disable unknown pragma warning #endif #ifdef __cplusplus extern "C" { #endif /** Fall here if no OPENMP support, so just simulate a single thread running. All #pragma omp should be ignored by the compiler **/ inline void omp_set_num_threads(int) __GOMP_NOTHROW {} // NOLINT (no cast done here) inline int omp_get_num_threads() __GOMP_NOTHROW {return 1;} inline int omp_get_max_threads() __GOMP_NOTHROW {return 1;} inline int omp_get_thread_num() __GOMP_NOTHROW {return 0;} inline int OMP_NUM_THREADS() __GOMP_NOTHROW { return 1; } #ifdef __cplusplus } // extern "C" #endif #define OMP_INIT_EX() #define OMP_LOOP_EX_BEGIN() #define OMP_LOOP_EX_END() #define OMP_THROW_EX() #endif #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
decorate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE % % D D E C O O R R A A T E % % D D EEE C O O RRRR AAAAA T EEE % % D D E C O O R R A A T E % % DDDD EEEEE CCCC OOO R R A A T EEEEE % % % % % % MagickCore Image Decoration Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" /* Define declarations. */ #define AccentuateModulate ScaleCharToQuantum(80) #define HighlightModulate ScaleCharToQuantum(125) #define ShadowModulate ScaleCharToQuantum(135) #define DepthModulate ScaleCharToQuantum(185) #define TroughModulate ScaleCharToQuantum(110) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B o r d e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BorderImage() surrounds the image with a border of the color defined by % the bordercolor member of the image structure. The width and height % of the border are defined by the corresponding members of the border_info % structure. % % The format of the BorderImage method is: % % Image *BorderImage(const Image *image,const RectangleInfo *border_info, % const CompositeOperator compose,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o border_info: define the width and height of the border. % % o compose: the composite operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BorderImage(const Image *image, const RectangleInfo *border_info,const CompositeOperator compose, ExceptionInfo *exception) { Image *border_image, *clone_image; FrameInfo frame_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(border_info != (RectangleInfo *) NULL); frame_info.width=image->columns+(border_info->width << 1); frame_info.height=image->rows+(border_info->height << 1); frame_info.x=(ssize_t) border_info->width; frame_info.y=(ssize_t) border_info->height; frame_info.inner_bevel=0; frame_info.outer_bevel=0; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); clone_image->alpha_color=image->border_color; border_image=FrameImage(clone_image,&frame_info,compose,exception); clone_image=DestroyImage(clone_image); if (border_image != (Image *) NULL) border_image->alpha_color=image->alpha_color; return(border_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F r a m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FrameImage() adds a simulated three-dimensional border around the image. % The color of the border is defined by the alpha_color member of image. % Members width and height of frame_info specify the border width of the % vertical and horizontal sides of the frame. Members inner and outer % indicate the width of the inner and outer shadows of the frame. % % The format of the FrameImage method is: % % Image *FrameImage(const Image *image,const FrameInfo *frame_info, % const CompositeOperator compose,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o frame_info: Define the width and height of the frame and its bevels. % % o compose: the composite operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info, const CompositeOperator compose,ExceptionInfo *exception) { #define FrameImageTag "Frame/Image" CacheView *image_view, *frame_view; Image *frame_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo accentuate, highlight, matte, shadow, trough; register ssize_t x; size_t bevel_width, height, width; ssize_t y; /* Check frame geometry. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(frame_info != (FrameInfo *) NULL); if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel); x=(ssize_t) frame_info->width-frame_info->x-bevel_width; y=(ssize_t) frame_info->height-frame_info->y-bevel_width; if ((x < (ssize_t) image->columns) | (y < (ssize_t) image->rows)) ThrowImageException(OptionError,"FrameIsLessThanImageSize"); /* Initialize framed image attributes. */ frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue, exception); if (frame_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse) { frame_image=DestroyImage(frame_image); return((Image *) NULL); } if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) && (IsGrayColorspace(frame_image->colorspace) != MagickFalse)) (void) SetImageColorspace(frame_image,sRGBColorspace,exception); if ((frame_image->alpha_color.alpha_trait != UndefinedPixelTrait) && (frame_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(frame_image,OpaqueAlpha,exception); frame_image->page=image->page; if ((image->page.width != 0) && (image->page.height != 0)) { frame_image->page.width+=frame_image->columns-image->columns; frame_image->page.height+=frame_image->rows-image->rows; } /* Initialize 3D effects color. */ matte=image->alpha_color; accentuate=matte; accentuate.red=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate))); accentuate.green=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate))); accentuate.blue=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate))); accentuate.black=(double) (QuantumScale*((QuantumRange- AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate))); accentuate.alpha=matte.alpha; highlight=matte; highlight.red=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.red+(QuantumRange*HighlightModulate))); highlight.green=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.green+(QuantumRange*HighlightModulate))); highlight.blue=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate))); highlight.black=(double) (QuantumScale*((QuantumRange- HighlightModulate)*matte.black+(QuantumRange*HighlightModulate))); highlight.alpha=matte.alpha; shadow=matte; shadow.red=QuantumScale*matte.red*ShadowModulate; shadow.green=QuantumScale*matte.green*ShadowModulate; shadow.blue=QuantumScale*matte.blue*ShadowModulate; shadow.black=QuantumScale*matte.black*ShadowModulate; shadow.alpha=matte.alpha; trough=matte; trough.red=QuantumScale*matte.red*TroughModulate; trough.green=QuantumScale*matte.green*TroughModulate; trough.blue=QuantumScale*matte.blue*TroughModulate; trough.black=QuantumScale*matte.black*TroughModulate; trough.alpha=matte.alpha; status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); frame_view=AcquireAuthenticCacheView(frame_image,exception); height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (height != 0) { register ssize_t x; register Quantum *magick_restrict q; /* Draw top of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns, height,exception); if (q != (Quantum *) NULL) { /* Draw top of ornamental border. */ for (y=0; y < (ssize_t) frame_info->outer_bevel; y++) { for (x=0; x < (ssize_t) (frame_image->columns-y); x++) { if (x < y) SetPixelViaPixelInfo(frame_image,&highlight,q); else SetPixelViaPixelInfo(frame_image,&accentuate,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) frame_image->columns; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=0; y < (ssize_t) frame_info->inner_bevel; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } width=image->columns+((size_t) frame_info->inner_bevel << 1)- y; for (x=0; x < (ssize_t) width; x++) { if (x < y) SetPixelViaPixelInfo(frame_image,&shadow,q); else SetPixelViaPixelInfo(frame_image,&trough,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } /* Draw sides of ornamental border. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,frame_image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; size_t width; /* Initialize scanline with matte color. */ if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y, frame_image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } /* Set frame interior pixels. */ { register const Quantum *p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { SetPixelBackgoundColor(frame_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(frame_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait frame_traits=GetPixelChannelTraits(frame_image,channel); if ((traits == UndefinedPixelTrait) || (frame_traits == UndefinedPixelTrait)) continue; SetPixelChannel(frame_image,channel,p[i],q); } SetPixelRed(frame_image,GetPixelRed(image,p),q); SetPixelGreen(frame_image,GetPixelGreen(image,p),q); SetPixelBlue(frame_image,GetPixelBlue(image,p),q); SetPixelAlpha(frame_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(frame_image); } } for (x=0; x < (ssize_t) frame_info->inner_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FrameImage) #endif proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } height=(size_t) (frame_info->inner_bevel+frame_info->height- frame_info->y-image->rows-bevel_width+frame_info->outer_bevel); if (height != 0) { register ssize_t x; register Quantum *magick_restrict q; /* Draw bottom of ornamental border. */ q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows- height),frame_image->columns,height,exception); if (q != (Quantum *) NULL) { /* Draw bottom of ornamental border. */ for (y=frame_info->inner_bevel-1; y >= 0; y--) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < y; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++) { if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y)) SetPixelViaPixelInfo(frame_image,&highlight,q); else SetPixelViaPixelInfo(frame_image,&accentuate,q); q+=GetPixelChannels(frame_image); } width=frame_info->width-frame_info->x-image->columns-bevel_width; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } height=frame_info->height-frame_info->y-image->rows-bevel_width; for (y=0; y < (ssize_t) height; y++) { for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } width=frame_image->columns-2*frame_info->outer_bevel; for (x=0; x < (ssize_t) width; x++) { SetPixelViaPixelInfo(frame_image,&matte,q); q+=GetPixelChannels(frame_image); } for (x=0; x < (ssize_t) frame_info->outer_bevel; x++) { SetPixelViaPixelInfo(frame_image,&shadow,q); q+=GetPixelChannels(frame_image); } } for (y=frame_info->outer_bevel-1; y >= 0; y--) { for (x=0; x < y; x++) { SetPixelViaPixelInfo(frame_image,&highlight,q); q+=GetPixelChannels(frame_image); } for ( ; x < (ssize_t) frame_image->columns; x++) { if (x >= (ssize_t) (frame_image->columns-y)) SetPixelViaPixelInfo(frame_image,&shadow,q); else SetPixelViaPixelInfo(frame_image,&trough,q); q+=GetPixelChannels(frame_image); } } (void) SyncCacheViewAuthenticPixels(frame_view,exception); } } frame_view=DestroyCacheView(frame_view); image_view=DestroyCacheView(image_view); x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+ frame_info->inner_bevel); y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+ frame_info->inner_bevel); if (status != MagickFalse) status=CompositeImage(frame_image,image,compose,MagickTrue,x,y, exception); if (status == MagickFalse) frame_image=DestroyImage(frame_image); return(frame_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RaiseImage() creates a simulated three-dimensional button-like effect % by lightening and darkening the edges of the image. Members width and % height of raise_info define the width of the vertical and horizontal % edge of the effect. % % The format of the RaiseImage method is: % % MagickBooleanType RaiseImage(const Image *image, % const RectangleInfo *raise_info,const MagickBooleanType raise, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o raise_info: Define the width and height of the raise area. % % o raise: A value other than zero creates a 3-D raise effect, % otherwise it has a lowered effect. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RaiseImage(Image *image, const RectangleInfo *raise_info,const MagickBooleanType raise, ExceptionInfo *exception) { #define AccentuateFactor ScaleCharToQuantum(135) #define HighlightFactor ScaleCharToQuantum(190) #define ShadowFactor ScaleCharToQuantum(190) #define RaiseImageTag "Raise/Image" #define TroughFactor ScaleCharToQuantum(135) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum foreground, background; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(raise_info != (RectangleInfo *) NULL); if ((image->columns <= (raise_info->width << 1)) || (image->rows <= (raise_info->height << 1))) ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth", image->filename); foreground=QuantumRange; background=(Quantum) 0; if (raise == MagickFalse) { foreground=(Quantum) 0; background=QuantumRange; } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Raise image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) raise_info->height; y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < y; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-y); x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+ (double) foreground*(QuantumRange-AccentuateFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) raise_info->width; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-raise_info->width); x++) q+=GetPixelChannels(image); for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++) { register ssize_t i, x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->rows-y); x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double) foreground*(QuantumRange-HighlightFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+ (double) background*(QuantumRange-TroughFactor))); } q+=GetPixelChannels(image); } for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double) background*(QuantumRange-ShadowFactor))); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RaiseImage) #endif proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) { for (t4=max(max(ceild(t1-510,512),ceild(8*t2-Nz-2035,2048)),ceild(8*t3-Ny-2035,2048));t4<=min(min(floord(4*Nt+Nx-9,2048),floord(4*t1+Nx-1,2048)),floord(8*t3+Nx-5,2048));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),512*t4+510);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
LAGraphX_bc_batch3.c
//------------------------------------------------------------------------------ // LAGraphX_bc_batch: Brandes' algorithm for computing betweeness centrality //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_bc_batch: Batch algorithm for computing betweeness centrality. // Contributed by Scott Kolodziej and Tim Davis, Texas A&M University. // Adapted from GraphBLAS C API Spec, Appendix B.4. // LAGraph_bc_batch computes an approximation of the betweenness centrality of // all nodes in a graph using a batched version of Brandes' algorithm. // ____ // \ sigma(s,t | i) // Betweenness centrality = \ ---------------- // of node i / sigma(s,t) // /___ // s ≠ i ≠ t // // Where sigma(s,t) is the total number of shortest paths from node s to // node t, and sigma(s,t | i) is the total number of shortest paths from // node s to node t that pass through node i. // // Note that the true betweenness centrality requires computing shortest paths // from all nodes s to all nodes t (or all-pairs shortest paths), which can be // expensive to compute. By using a reasonably sized subset of source nodes, an // approximation can be made. // // LAGraph_bc_batch performs simultaneous breadth-first searches of the entire // graph starting at a given set of source nodes. This pass discovers all // shortest paths from the source nodes to all other nodes in the graph. After // the BFS is complete, the number of shortest paths that pass through a given // node is tallied by reversing the traversal. From this, the (approximate) // betweenness centrality is computed. // A_matrix represents the graph. It must be square, and can be unsymmetric. // Self-edges are OK. #define DO_PULL 0 // If DO_PULL is #defined, the algorithm does each GrB_mxm twice: once with the // "pull" (dot product method internally in GraphBLAS) and ones with the "push" // (the saxpy method in GraphBLAS). Then it pretends to have a perfect // heuristic by taking the min of both times to compute the "pushpull" time. // This is of course unrealistic, but it's a lower bound on any heuristc that // tries to select the correct method at each step. //------------------------------------------------------------------------------ #include "LAGraph_internal.h" #define LAGRAPH_FREE_WORK \ { \ GrB_free(&frontier); \ GrB_free(&paths); \ LAGraph_free(paths_dense); \ LAGraph_free(bc_update_dense); \ GrB_free(&t1); \ GrB_free(&t2); \ GrB_free (&pull_descriptor) ; \ if (S_array != NULL) \ { \ for (int64_t i = 0; i < n ; i++) \ { \ if (S_array [i] == NULL) break ; \ GrB_free (&(S_array [i])) ; \ } \ free (S_array) ; \ } \ } #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE_WORK; \ GrB_free (centrality); \ } // TODO add LAGraph_PLUS_SECOND_FP* to LAGraph.h. #if 0 // select FP64 #define REAL_t double #define LAGr_REAL_TYPE GrB_FP64 #define LAGr_PLUS_SECOND_REAL GxB_PLUS_SECOND_FP64 #else // select FP32 #define REAL_t float #define LAGr_REAL_TYPE GrB_FP32 #define LAGr_PLUS_SECOND_REAL GxB_PLUS_SECOND_FP32 #endif GrB_Info LAGraphX_bc_batch3 // betweeness centrality, batch algorithm ( GrB_Vector *centrality, // centrality(i): betweeness centrality of node i const GrB_Matrix A_matrix, // input graph const GrB_Matrix AT_matrix, // A' const GrB_Index *sources, // source vertices for shortest paths int32_t num_sources, // number of source vertices (length of s) double timing [3] ) { GrB_Info info ; GrB_Descriptor pull_descriptor = NULL ; // Frontier matrix // Stores # of shortest paths to vertices at current BFS depth GrB_Matrix frontier = NULL; // Array of BFS search matrices // S_array[i] is a matrix that stores the depth at which each vertex is // first seen thus far in each BFS at the current depth i. Each column // corresponds to a BFS traversal starting from a source node. GrB_Matrix *S_array = NULL; // Paths matrix holds the number of shortest paths for each node and // starting node discovered so far. Starts out sparse and becomes denser. GrB_Matrix paths = NULL; REAL_t *paths_dense = NULL; // Update matrix for betweenness centrality, values for each node for // each starting node. Treated as dense for efficiency. REAL_t *bc_update_dense = NULL; GrB_Matrix t1 = NULL; GrB_Matrix t2 = NULL; GrB_Index n; // Number of nodes in the graph (*centrality) = NULL; //-------------------------------------------------------------------------- double tic [2]; LAGraph_tic (tic); GxB_Format_Value a_fmt, at_fmt ; LAGRAPH_OK (GxB_get (A_matrix, GxB_FORMAT, &a_fmt )) ; LAGRAPH_OK (GxB_get (AT_matrix, GxB_FORMAT, &at_fmt)) ; if (a_fmt != GxB_BY_ROW || at_fmt != GxB_BY_ROW) { LAGRAPH_ERROR ("A and AT must be stored by row", GrB_INVALID_VALUE) ; } int nthreads ; GxB_get (GxB_NTHREADS, &nthreads) ; GrB_Index* Sp = NULL; GrB_Index* Si = NULL; REAL_t *Sx = NULL; GrB_Index* Tp = NULL; GrB_Index* Ti = NULL; REAL_t *Tx = NULL; GrB_Index num_rows, num_cols, nnz, anz ; GrB_Type type ; LAGr_Matrix_nrows (&n, A_matrix) ; // # of nodes LAGr_Matrix_nvals (&anz, A_matrix) ; // # of edges double d = ((double) anz) / ((double) n) ; // average degree // descriptor for "pull" method: LAGraph_desc_oocr + dot GrB_Descriptor_new (&pull_descriptor) ; GrB_Descriptor_set (pull_descriptor, GrB_MASK, GrB_SCMP) ; GrB_Descriptor_set (pull_descriptor, GrB_OUTP, GrB_REPLACE) ; GrB_Descriptor_set (pull_descriptor, GxB_AxB_METHOD, GxB_AxB_DOT) ; // Initialize paths to source vertices with ones // paths[s[i],i]=1 for i=[0, ..., num_sources) if (sources == GrB_ALL) { num_sources = n; // TODO delete this option } const GrB_Index nnz_dense = n * num_sources ; double ns = num_sources ; LAGr_Matrix_new(&paths, LAGr_REAL_TYPE, n, num_sources); GxB_set(paths, GxB_FORMAT, GxB_BY_COL); // make paths dense LAGr_assign (paths, NULL, NULL, 0, GrB_ALL, n, GrB_ALL, num_sources, NULL) ; // Force resolution of pending tuples GrB_Index ignore; GrB_Matrix_nvals(&ignore, paths); if (sources == GrB_ALL) { // TODO: remove this option for (GrB_Index i = 0; i < num_sources; ++i) { // paths [i,i] = 1 LAGr_Matrix_setElement(paths, (REAL_t) 1, i, i); } } else { for (GrB_Index i = 0; i < num_sources; ++i) { // paths [s[i],i] = 1 LAGr_Matrix_setElement(paths, (REAL_t) 1, sources[i], i); } } // Create frontier matrix and initialize to outgoing nodes from // all source nodes LAGr_Matrix_new(&frontier, LAGr_REAL_TYPE, n, num_sources); GxB_set(frontier, GxB_FORMAT, GxB_BY_COL); // AT = A' // frontier <!paths> = AT (:,sources) // TODO: use mxm, so A_matrix values are ignored. LAGr_extract(frontier, paths, GrB_NULL, A_matrix, GrB_ALL, n, sources, num_sources, LAGraph_desc_tocr); // Allocate memory for the array of S matrices S_array = (GrB_Matrix*) LAGraph_calloc (n, sizeof(GrB_Matrix)); if (S_array == NULL) { // out of memory LAGRAPH_FREE_ALL; return (GrB_OUT_OF_MEMORY); } //-------------------------------------------------------------------------- // Breadth-first search stage //-------------------------------------------------------------------------- GrB_Index frontier_size = 0 ; // size of current frontier LAGr_Matrix_nvals (&frontier_size, frontier) ; GrB_Index seen = 0 ; // total # of nodes seen * (# sources) double time_1 = LAGraph_toc (tic) ; // printf (" init: %g\n", time_1) ; double phase1_other_time = 0 ; double phase1_allpush_time = 0 ; double phase1_allpull_time = 0 ; double phase1_pushpull_time = 0 ; int nth = LAGRAPH_MIN (nthreads, num_sources) ; int64_t depth = 0; // Initial BFS depth do { LAGraph_tic (tic); // printf ("depth: %g\n", (double) depth) ; // Create the current search matrix - one column for each source/BFS LAGr_Matrix_new (&(S_array[depth]), GrB_BOOL, n, num_sources) ; GxB_set (S_array[depth], GxB_FORMAT, GxB_BY_COL) ; // Copy the current frontier to S LAGr_apply (S_array[depth], GrB_NULL, GrB_NULL, GrB_IDENTITY_BOOL, frontier, GrB_NULL) ; //=== Accumulate path counts: paths += frontier ======================== // Export paths int64_t paths_nonempty ; GxB_Matrix_export_CSC(&paths, &type, &num_rows, &num_cols, &nnz, &paths_nonempty, &Sp, &Si, (void **) &Sx, GrB_NULL); // Export frontier int64_t frontier_nonempty ; GxB_Matrix_export_CSC(&frontier, &type, &num_rows, &num_cols, &nnz, &frontier_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL); // Use frontier pattern to update dense paths #pragma omp parallel for num_threads(nth) for (int64_t col = 0; col < num_sources; col++) { for (GrB_Index p = Tp[col]; p < Tp[col+1]; p++) { GrB_Index row = Ti[p]; Sx [col * n + row] += Tx [p]; } } // Import frontier GxB_Matrix_import_CSC(&frontier, LAGr_REAL_TYPE, n, num_sources, nnz, frontier_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL); // Import paths GxB_Matrix_import_CSC(&paths, LAGr_REAL_TYPE, n, num_sources, nnz_dense, paths_nonempty, &Sp, &Si, (void **) &Sx, GrB_NULL); phase1_other_time += LAGraph_toc (tic) ; //=== Update frontier: frontier<!paths>=A’ +.∗ frontier ================ seen += frontier_size ; // # nonzeros in paths array /* double u = (n * ns - seen) ; // # zeros in paths array double f = frontier_size / ns ; double push_work_estimate = d * frontier_size ; double pull_work_estimate = u * fmin (d + f, d * log2 (f)) ; printf ("\n1: d: %g f: %g u: %g ns: %g " "push: %g pull: %g pull/push %g\n", d, f, u, ns, push_work_estimate, pull_work_estimate, pull_work_estimate / push_work_estimate) ; */ double pull_time = INFINITY ; #if DO_PULL GrB_Matrix frontier2 = NULL ; GrB_Matrix_dup (&frontier2, frontier) ; // uses the "pull" method (dot), because AT_matrix is stored by // row, and frontier is stored by column. LAGraph_tic (tic); LAGr_mxm(frontier2, paths, GrB_NULL, LAGr_PLUS_SECOND_REAL, AT_matrix, frontier2, pull_descriptor) ; pull_time = LAGraph_toc (tic) ; // printf ("1: pull_time: %g sec\n", pull_time) ; GrB_free (&frontier2) ; #endif phase1_allpull_time += pull_time ; // uses the "push" method (saxpy) LAGraph_tic (tic); LAGr_mxm(frontier, paths, GrB_NULL, LAGr_PLUS_SECOND_REAL, A_matrix, frontier, LAGraph_desc_tocr); double push_time = LAGraph_toc (tic) ; // printf ("1: push_time: %g sec, pull/push %g\n", // push_time, pull_time/push_time) ; phase1_allpush_time += push_time ; // assume a perfect pushpull heuristic double pushpull_time = fmin (pull_time, push_time) ; phase1_pushpull_time += pushpull_time ; //=== Find the new frontier size ======================================= LAGraph_tic (tic); LAGr_Matrix_nvals (&frontier_size, frontier) ; depth = depth + 1; phase1_other_time += LAGraph_toc (tic) ; } while (frontier_size > 0) ; // Repeat until the frontier is empty // printf (" 1st mxm allpush: %g\n", phase1_allpush_time) ; #if DO_PULL printf (" 1st mxm allpull: %g\n", phase1_allpull_time) ; printf (" 1st mxm pushpull: %g\n", phase1_pushpull_time) ; #endif // printf (" 1st other: %g\n", phase1_other_time) ; LAGraph_tic (tic); //-------------------------------------------------------------------------- // Betweenness centrality computation phase //-------------------------------------------------------------------------- // Create the dense update matrix and initialize it to 1 // We will store it column-wise (col * p + row) bc_update_dense = LAGraph_malloc(nnz_dense, sizeof(REAL_t)); #pragma omp parallel for num_threads(nthreads) for (GrB_Index nz = 0; nz < nnz_dense; nz++) { bc_update_dense[nz] = 1.0; } // By this point, paths is (mostly) dense. // Create a dense version of the GraphBLAS paths matrix int64_t paths_nonempty ; GxB_Matrix_export_CSC(&paths, &type, &num_rows, &num_cols, &nnz, &paths_nonempty, &Sp, &Si, (void **) &paths_dense, GrB_NULL); // Throw away the "sparse" version of paths LAGraph_free(Sp); LAGraph_free(Si); // Create temporary workspace matrix LAGr_Matrix_new(&t2, LAGr_REAL_TYPE, n, num_sources); GxB_set(t2, GxB_FORMAT, GxB_BY_COL); double time_3 = LAGraph_toc (tic) ; double phase2_other_time = 0 ; double phase2_allpush_time = 0 ; double phase2_allpull_time = 0 ; double phase2_pushpull_time = 0 ; // Backtrack through the BFS and compute centrality updates for each vertex for (int64_t i = depth - 1; i > 0; i--) { // Add contributions by successors and mask with that BFS level's // frontier LAGraph_tic (tic); // printf ("back: %g\n", (double) i) ; /* GrB_Index prior_size ; GrB_Matrix_nvals (&prior_size, S_array [i-1]) ; GrB_Matrix_nvals (&frontier_size, S_array [i]) ; double u = prior_size ; // # entries in the mask double f = frontier_size / ns ; double push_work_estimate = d * frontier_size ; double pull_work_estimate = u * fmin (d + f, d * log2 (f)) ; printf ("\n2: d: %g f: %g u: %g ns: %g " "push: %g pull: %g pull/push %g\n", d, f, u, ns, push_work_estimate, pull_work_estimate, pull_work_estimate / push_work_estimate) ; */ //=== temp<S_array[i]> = bc_update ./ paths ============================ // Export the pattern of S_array[i] void *Bx ; int64_t S_nonempty ; GxB_Matrix_export_CSC(&(S_array[i]), &type, &num_rows, &num_cols, &nnz, &S_nonempty, &Sp, &Si, &Bx, GrB_NULL); // Compute Tx = bc_update ./ paths_dense for all elements of S_array // Build the Tp and Ti vectors, too. Tp = LAGraph_malloc(num_sources+1, sizeof(GrB_Index)); Ti = LAGraph_malloc(nnz, sizeof(GrB_Index)); Tx = LAGraph_malloc(nnz, sizeof(REAL_t)); #pragma omp parallel for num_threads(nthreads) for (int64_t col = 0; col < num_sources; col++) { Tp[col] = Sp[col]; for (GrB_Index p = Sp[col]; p < Sp[col+1]; p++) { // Compute Tx by eWiseMult of dense matrices GrB_Index row = Ti[p] = Si[p]; Tx [p] = bc_update_dense [col * n + row] / paths_dense [col * n + row] ; } } Tp[num_sources] = Sp[num_sources]; // Restore S_array[i] by importing it GxB_Matrix_import_CSC(&(S_array[i]), GrB_BOOL, num_rows, num_cols, nnz, S_nonempty, &Sp, &Si, &Bx, GrB_NULL); // Create a GraphBLAS matrix t1 from Tp, Ti, Tx // The row/column indices are the pattern r/c from S_array[i] GxB_Matrix_import_CSC(&t1, LAGr_REAL_TYPE, n, num_sources, nnz, S_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL); phase2_other_time += LAGraph_toc (tic) ; //=== t2<S_array[i−1]> = (A * t1) ====================================== double pull_time = INFINITY ; #if DO_PULL // uses the "pull" method (dot) LAGraph_tic (tic); GrB_free (&t2) ; LAGr_Matrix_new(&t2, LAGr_REAL_TYPE, n, num_sources); GxB_set(t2, GxB_FORMAT, GxB_BY_COL); LAGr_mxm(t2, S_array[i-1], GrB_NULL, LAGr_PLUS_SECOND_REAL, A_matrix, t1, LAGraph_desc_ooor); pull_time = LAGraph_toc (tic) ; printf ("2: pull_time: %g sec\n", pull_time) ; #endif phase2_allpull_time += pull_time ; // uses the "push" method (saxpy) LAGraph_tic (tic); GrB_free (&t2) ; LAGr_Matrix_new(&t2, LAGr_REAL_TYPE, n, num_sources); GxB_set(t2, GxB_FORMAT, GxB_BY_COL); LAGr_mxm(t2, S_array[i-1], GrB_NULL, LAGr_PLUS_SECOND_REAL, AT_matrix, t1, LAGraph_desc_toor); double push_time = LAGraph_toc (tic) ; // printf ("2: push_time: %g sec, pull/push %g\n", push_time, // pull_time/push_time) ; phase2_allpush_time += push_time ; // assume a perfect pushpull heuristic double pushpull_time = fmin (pull_time, push_time) ; phase2_pushpull_time += pushpull_time ; LAGraph_tic (tic); GrB_free(&t1); //=== bc_update += t2 .* paths ========================================= int64_t t2_nonempty ; GxB_Matrix_export_CSC(&t2, &type, &num_rows, &num_cols, &nnz, &t2_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL); #pragma omp parallel for num_threads(nth) for (int64_t col = 0; col < num_sources; col++) { for (GrB_Index p = Tp[col]; p < Tp[col+1]; p++) { GrB_Index row = Ti[p]; bc_update_dense [col * n + row] += Tx [p] * paths_dense [col * n + row] ; } } // Re-import t2 GxB_Matrix_import_CSC(&t2, LAGr_REAL_TYPE, num_rows, num_cols, nnz, t2_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL); phase2_other_time += LAGraph_toc (tic) ; } // printf (" 2nd mxm allpush: %g\n", phase2_allpush_time) ; #if DO_PULL printf (" 2nd mxm allpull: %g\n", phase2_allpull_time) ; printf (" 2nd mxm pushpull: %g\n", phase2_pushpull_time) ; #endif // printf (" 2nd other: %g\n", phase2_other_time + time_3) ; LAGraph_tic (tic); //-------------------------------------------------------------------------- // finalize centrality scores //-------------------------------------------------------------------------- //=== Initialize the centrality array with -(num_sources) to avoid counting // zero length paths ==================================================== REAL_t *centrality_dense = LAGraph_malloc(n, sizeof(REAL_t)); #pragma omp parallel for num_threads(nthreads) for (GrB_Index i = 0; i < n; i++) { centrality_dense[i] = -num_sources; } //=== centrality[i] += bc_update[i,:] ====================================== // Both are dense. We can also take care of the reduction. #pragma omp parallel for schedule(static) num_threads(nthreads) for (GrB_Index j = 0; j < n; j++) { for (int64_t i = 0; i < num_sources; i++) { centrality_dense[j] += bc_update_dense[n * i + j]; } } // Build the index vector. GrB_Index* I = LAGraph_malloc(n, sizeof(GrB_Index)); #pragma omp parallel for num_threads(nthreads) for (GrB_Index j = 0; j < n; j++) { I[j] = j; } // Import the dense vector into GraphBLAS and return it. GxB_Vector_import(centrality, LAGr_REAL_TYPE, n, n, &I, (void **) &centrality_dense, GrB_NULL); LAGRAPH_FREE_WORK; double time_5 = LAGraph_toc (tic) ; // printf (" wrapup: %g\n", time_5) ; timing [0] = time_1 + (phase1_pushpull_time + phase1_other_time) + time_3 + (phase2_pushpull_time + phase2_other_time) + time_5 ; timing [1] = time_1 + (phase1_allpush_time + phase1_other_time) + time_3 + (phase2_allpush_time + phase2_other_time) + time_5 ; timing [2] = time_1 + (phase1_allpull_time + phase1_other_time) + time_3 + (phase2_allpull_time + phase2_other_time) + time_5 ; #if DO_PULL printf ("Xbc total (pushpull): %g\n", timing [0]) ; #endif // printf ("Xbc total (allpush): %g\n", timing [1]) ; #if DO_PULL printf ("Xbc total (allpull): %g\n", timing [2]) ; #endif return GrB_SUCCESS; }
test.c
#include <stdio.h> #include "../utilities/check.h" #define N 100 int main() { check_offloading(); int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; // offload #pragma omp target map(tofrom: a[0:100]) { int k, n; #pragma omp simd private(n) for(k=0; k<N; k++) { n = k; a[k] = n; } } // host for(i=0; i<N; i++) aa[i] = i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error); if (error > 10) { printf("abort\n"); return 0; } } // report printf("done with %d errors\n", error); return error; }
simulation.c
#include "rsbench.h" //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only minor CPU optimizations in place. // Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, RSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// void run_event_based_simulation(Input input, SimulationData data, unsigned long * vhash_result ) { printf("Beginning baseline event based simulation on device...\n"); unsigned long verification = 0; int offloaded_to_device = 0; // Main simulation loop over macroscopic cross section lookups //#pragma omp parallel for reduction(+:verification) #pragma omp target teams distribute parallel for\ map(to:data.n_poles[:data.length_n_poles])\ map(to:data.n_windows[:data.length_n_windows])\ map(to:data.poles[:data.length_poles])\ map(to:data.windows[:data.length_windows])\ map(to:data.pseudo_K0RS[:data.length_pseudo_K0RS])\ map(to:data.num_nucs[:data.length_num_nucs])\ map(to:data.mats[:data.length_mats])\ map(to:data.concs[:data.length_concs])\ map(to:data.max_num_nucs)\ map(to:data.max_num_poles)\ map(to:data.max_num_windows)\ map(tofrom:offloaded_to_device)\ reduction(+:verification) for( int i = 0; i < input.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double E = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs[4] = {0}; calculate_macro_xs( macro_xs, mat, E, input, data.num_nucs, data.mats, data.max_num_nucs, data.concs, data.n_windows, data.pseudo_K0RS, data.windows, data.poles, data.max_num_windows, data.max_num_poles ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on it. For other accelerators, // a different approach might be required (e.g., atomics, reduction // of thread-specific values in large array via CUDA thrust, etc) double max = -DBL_MAX; int max_idx = 0; for(int x = 0; x < 4; x++ ) { if( macro_xs[x] > max ) { max = macro_xs[x]; max_idx = x; } } verification += max_idx+1; // Check if we are currently running on the device or not if( i == 0 ) offloaded_to_device = !omp_is_initial_device(); } // Print if kernel actually ran on the device if( offloaded_to_device ) printf( "Kernel ran accelerator device.\n" ); else printf( "NOTE - Kernel ran on the host!\n" ); *vhash_result = verification; } void calculate_macro_xs( double * macro_xs, int mat, double E, Input input, int * num_nucs, int * mats, int max_num_nucs, double * concs, int * n_windows, double * pseudo_K0Rs, Window * windows, Pole * poles, int max_num_windows, int max_num_poles ) { // zero out macro vector for( int i = 0; i < 4; i++ ) macro_xs[i] = 0; // for nuclide in mat for( int i = 0; i < num_nucs[mat]; i++ ) { double micro_xs[4]; int nuc = mats[mat * max_num_nucs + i]; if( input.doppler == 1 ) calculate_micro_xs_doppler( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles); else calculate_micro_xs( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles); for( int j = 0; j < 4; j++ ) { macro_xs[j] += micro_xs[j] * concs[mat * max_num_nucs + i]; } // Debug /* printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n", E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] ); */ } // Debug /* printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n", E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] ); */ } // No Temperature dependence (i.e., 0K evaluation) void calculate_micro_xs( double * micro_xs, int nuc, double E, Input input, int * n_windows, double * pseudo_K0RS, Window * windows, Pole * poles, int max_num_windows, int max_num_poles) { // MicroScopic XS's to Calculate double sigT; double sigA; double sigF; double sigE; // Calculate Window Index double spacing = 1.0 / n_windows[nuc]; int window = (int) ( E / spacing ); if( window == n_windows[nuc] ) window--; // Calculate sigTfactors RSComplex sigTfactors[4]; // Of length input.numL, which is always 4 calculate_sig_T(nuc, E, input, pseudo_K0RS, sigTfactors ); // Calculate contributions from window "background" (i.e., poles outside window (pre-calculated) Window w = windows[nuc * max_num_windows + window]; sigT = E * w.T; sigA = E * w.A; sigF = E * w.F; // Loop over Poles within window, add contributions for( int i = w.start; i < w.end; i++ ) { RSComplex PSIIKI; RSComplex CDUM; Pole pole = poles[nuc * max_num_poles + i]; RSComplex t1 = {0, 1}; RSComplex t2 = {sqrt(E), 0 }; PSIIKI = c_div( t1 , c_sub(pole.MP_EA,t2) ); RSComplex E_c = {E, 0}; CDUM = c_div(PSIIKI, E_c); sigT += (c_mul(pole.MP_RT, c_mul(CDUM, sigTfactors[pole.l_value])) ).r; sigA += (c_mul( pole.MP_RA, CDUM)).r; sigF += (c_mul(pole.MP_RF, CDUM)).r; } sigE = sigT - sigA; micro_xs[0] = sigT; micro_xs[1] = sigA; micro_xs[2] = sigF; micro_xs[3] = sigE; } // Temperature Dependent Variation of Kernel // (This involves using the Complex Faddeeva function to // Doppler broaden the poles within the window) void calculate_micro_xs_doppler( double * micro_xs, int nuc, double E, Input input, int * n_windows, double * pseudo_K0RS, Window * windows, Pole * poles, int max_num_windows, int max_num_poles ) { // MicroScopic XS's to Calculate double sigT; double sigA; double sigF; double sigE; // Calculate Window Index double spacing = 1.0 / n_windows[nuc]; int window = (int) ( E / spacing ); if( window == n_windows[nuc] ) window--; // Calculate sigTfactors RSComplex sigTfactors[4]; // Of length input.numL, which is always 4 calculate_sig_T(nuc, E, input, pseudo_K0RS, sigTfactors ); // Calculate contributions from window "background" (i.e., poles outside window (pre-calculated) Window w = windows[nuc * max_num_windows + window]; sigT = E * w.T; sigA = E * w.A; sigF = E * w.F; double dopp = 0.5; // Loop over Poles within window, add contributions for( int i = w.start; i < w.end; i++ ) { Pole pole = poles[nuc * max_num_poles + i]; // Prep Z RSComplex E_c = {E, 0}; RSComplex dopp_c = {dopp, 0}; RSComplex Z = c_mul(c_sub(E_c, pole.MP_EA), dopp_c); // Evaluate Fadeeva Function RSComplex faddeeva = fast_nuclear_W( Z ); // Update W sigT += (c_mul( pole.MP_RT, c_mul(faddeeva, sigTfactors[pole.l_value]) )).r; sigA += (c_mul( pole.MP_RA , faddeeva)).r; sigF += (c_mul( pole.MP_RF , faddeeva)).r; } sigE = sigT - sigA; micro_xs[0] = sigT; micro_xs[1] = sigA; micro_xs[2] = sigF; micro_xs[3] = sigE; } // picks a material based on a probabilistic distribution int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } void calculate_sig_T( int nuc, double E, Input input, double * pseudo_K0RS, RSComplex * sigTfactors ) { double phi; for( int i = 0; i < 4; i++ ) { phi = pseudo_K0RS[nuc * input.numL + i] * sqrt(E); if( i == 1 ) phi -= - atan( phi ); else if( i == 2 ) phi -= atan( 3.0 * phi / (3.0 - phi*phi)); else if( i == 3 ) phi -= atan(phi*(15.0-phi*phi)/(15.0-6.0*phi*phi)); phi *= 2.0; sigTfactors[i].r = cos(phi); sigTfactors[i].i = -sin(phi); } } // This function uses a combination of the Abrarov Approximation // and the QUICK_W three term asymptotic expansion. // Only expected to use Abrarov ~0.5% of the time. RSComplex fast_nuclear_W( RSComplex Z ) { // Abrarov if( c_abs(Z) < 6.0 ) { // Precomputed parts for speeding things up // (N = 10, Tm = 12.0) RSComplex prefactor = {0, 8.124330e+01}; double an[10] = { 2.758402e-01, 2.245740e-01, 1.594149e-01, 9.866577e-02, 5.324414e-02, 2.505215e-02, 1.027747e-02, 3.676164e-03, 1.146494e-03, 3.117570e-04 }; double neg_1n[10] = { -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0 }; double denominator_left[10] = { 9.869604e+00, 3.947842e+01, 8.882644e+01, 1.579137e+02, 2.467401e+02, 3.553058e+02, 4.836106e+02, 6.316547e+02, 7.994380e+02, 9.869604e+02 }; RSComplex t1 = {0, 12}; RSComplex t2 = {12, 0}; RSComplex i = {0,1}; RSComplex one = {1, 0}; RSComplex W = c_div(c_mul(i, ( c_sub(one, fast_cexp(c_mul(t1, Z))) )) , c_mul(t2, Z)); RSComplex sum = {0,0}; for( int n = 0; n < 10; n++ ) { RSComplex t3 = {neg_1n[n], 0}; RSComplex top = c_sub(c_mul(t3, fast_cexp(c_mul(t1, Z))), one); RSComplex t4 = {denominator_left[n], 0}; RSComplex t5 = {144, 0}; RSComplex bot = c_sub(t4, c_mul(t5,c_mul(Z,Z))); RSComplex t6 = {an[n], 0}; sum = c_add(sum, c_mul(t6, c_div(top,bot))); } W = c_add(W, c_mul(prefactor, c_mul(Z, sum))); return W; } else { // QUICK_2 3 Term Asymptotic Expansion (Accurate to O(1e-6)). // Pre-computed parameters RSComplex a = {0.512424224754768462984202823134979415014943561548661637413182,0}; RSComplex b = {0.275255128608410950901357962647054304017026259671664935783653, 0}; RSComplex c = {0.051765358792987823963876628425793170829107067780337219430904, 0}; RSComplex d = {2.724744871391589049098642037352945695982973740328335064216346, 0}; RSComplex i = {0,1}; RSComplex Z2 = c_mul(Z, Z); // Three Term Asymptotic Expansion RSComplex W = c_mul(c_mul(Z,i), (c_add(c_div(a,(c_sub(Z2, b))) , c_div(c,(c_sub(Z2, d)))))); return W; } } double LCG_random_double(uint64_t * seed) { const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } uint64_t LCG_random_int(uint64_t * seed) { const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return *seed; } uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } // Complex arithmetic functions RSComplex c_add( RSComplex A, RSComplex B) { RSComplex C; C.r = A.r + B.r; C.i = A.i + B.i; return C; } RSComplex c_sub( RSComplex A, RSComplex B) { RSComplex C; C.r = A.r - B.r; C.i = A.i - B.i; return C; } RSComplex c_mul( RSComplex A, RSComplex B) { double a = A.r; double b = A.i; double c = B.r; double d = B.i; RSComplex C; C.r = (a*c) - (b*d); C.i = (a*d) + (b*c); return C; } RSComplex c_div( RSComplex A, RSComplex B) { double a = A.r; double b = A.i; double c = B.r; double d = B.i; RSComplex C; double denom = c*c + d*d; C.r = ( (a*c) + (b*d) ) / denom; C.i = ( (b*c) - (a*d) ) / denom; return C; } double c_abs( RSComplex A) { return sqrt(A.r*A.r + A.i * A.i); } // Fast (but inaccurate) exponential function // Written By "ACMer": // https://codingforspeed.com/using-faster-exponential-approximation/ // We use our own to avoid small differences in compiler specific // exp() intrinsic implementations that make it difficult to verify // if the code is working correctly or not. double fast_exp(double x) { x = 1.0 + x * 0.000244140625; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; return x; } // Implementation based on: // z = x + iy // cexp(z) = e^x * (cos(y) + i * sin(y)) RSComplex fast_cexp( RSComplex z ) { double x = z.r; double y = z.i; // For consistency across architectures, we // will use our own exponetial implementation //double t1 = exp(x); double t1 = fast_exp(x); double t2 = cos(y); double t3 = sin(y); RSComplex t4 = {t2, t3}; RSComplex t5 = {t1, 0}; RSComplex result = c_mul(t5, (t4)); return result; }
SeparableFilter.h
/* Copyright 2007 Niels Martin Hansen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Contact: E-mail: <jiifurusu@gmail.com> IRC: jfs in #aegisub on irc.rizon.net */ #pragma once #ifdef _OPENMP #include <omp.h> #endif #include <math.h> // Filter an image in horizontal direction with a one-dimensional filter // PixelWidth is the distance in bytes between pixels template<ptrdiff_t PixelDist> void SeparableFilterX(unsigned char *src, unsigned char *dst, int width, int height, ptrdiff_t stride, int *kernel, int kernel_size, int divisor) { #pragma omp parallel for for (int y = 0; y < height; y++) { unsigned char *in = src + y*stride; unsigned char *out = dst + y*stride; for (int x = 0; x < width; x++) { int accum = 0; for (int k = 0; k < kernel_size; k++) { int xofs = k - kernel_size/2; if (x+xofs < 0) xofs += width; if (x+xofs >= width) xofs -= width; accum += (int)(in[xofs*PixelDist] * kernel[k]); } accum /= divisor; if (accum > 255) accum = 255; if (accum < 0) accum = 0; *out = (unsigned char)accum; in+=PixelDist; out+=PixelDist; } } } // Filter an image in vertical direction with a one-dimensional filter // This one templated with PixelWidth since the channel interlacing is horizontal only, // filtering once vertically will automatically catch all channels. // (Width must be multiplied by pixel width for that to happen though.) template<ptrdiff_t PixelDist> void SeparableFilterY(unsigned char *src, unsigned char *dst, int width, int height, ptrdiff_t stride, int *kernel, int kernel_size, int divisor) { #pragma omp parallel for width *= PixelDist; for (int x = 0; x < width; x+=PixelDist) { unsigned char *in = src + x; unsigned char *out = dst + x; for (int y = 0; y < height; y++) { int accum = 0; for (int k = 0; k < kernel_size; k++) { int yofs = k - kernel_size/2; if (y+yofs < 0) yofs += height; if (y+yofs >= height) yofs -= height; accum += (int)(in[yofs*stride] * kernel[k]); } accum /= divisor; if (accum > 255) accum = 255; if (accum < 0) accum = 0; *out = (unsigned char)accum; in += stride; out += stride; } } } static inline double NormalDist(double sigma, double x) { if (sigma <= 0 && x == 0) return 1; else if (sigma <= 0) return 0; else return exp(-(x*x)/(2*sigma*sigma)) / (sigma * sqrt(2*3.1415926535)); } struct GaussianKernel { int *kernel; int width; int divisor; inline GaussianKernel(double sigma) { width = (int)(sigma*3 + 0.5) | 1; // binary-or with 1 to make sure the number is odd if (width < 3) width = 3; kernel = new int[width]; kernel[width/2] = (int)(NormalDist(sigma, 0) * 255); divisor = kernel[width/2]; for (int x = width/2-1; x >= 0; x--) { int val = (int)(NormalDist(sigma, width/2-x) * 255 + 0.5); divisor += val*2; kernel[x] = val; kernel[width - x - 1] = val; } } inline ~GaussianKernel() { delete[] kernel; } };
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); image->color_profile.length=clone_image->color_profile.length; image->color_profile.info=clone_image->color_profile.info; image->iptc_profile.length=clone_image->iptc_profile.length; image->iptc_profile.info=clone_image->iptc_profile.info; if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double **magick_restrict pixels, scale, translate; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static double **DestroyPixelThreadSet(double **pixels) { register ssize_t i; if (pixels == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (double *) NULL) pixels[i]=(double *) RelinquishMagickMemory(pixels[i]); pixels=(double **) RelinquishMagickMemory(pixels); return(pixels); } static double **AcquirePixelThreadSet(const size_t columns, const size_t channels) { double **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(double **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (double **) NULL) return((double **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(double *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (double *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { Image *image; (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); image=(Image *) cmsGetContextUserData(context); if (image != (Image *) NULL) (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"UnableToTransformColorspace","`%s'",image->filename); } #endif static MagickBooleanType SetsRGBImageProfile(Image *image) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length, const MagickBooleanType magick_unused(clone)) { #define GetLCMSPixel(source_info,pixel) \ (source_info.scale*QuantumScale*(pixel)+source_info.translate) #define ProfileImageTag "Profile/Image" #define SetLCMSPixel(target_info,pixel) \ ClampToQuantum(target_info.scale*QuantumRange*(pixel)+target_info.translate) #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; magick_unreferenced(clone); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace"); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(&image->exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)", image->filename); #else { cmsContext cms_context; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_context=cmsCreateContext(NULL,image); if (cms_context == (cmsContext) NULL) ThrowBinaryImageException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); cmsSetLogErrorHandlerTHR(cms_context,LCMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { cmsDeleteContext(cms_context); ThrowBinaryImageException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; ExceptionInfo *exception; MagickOffsetType progress; ssize_t y; exception=(&image->exception); target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } source_info.scale=1.0; source_info.translate=0.0; source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; source_info.scale=100.0; break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale=100.0; source_info.translate=(-0.5); break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); target_info.scale=1.0; target_info.translate=0.0; target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; target_info.scale=0.01; break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale=0.01; target_info.translate=0.5; break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info, flags,cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels); if ((source_info.pixels == (double **) NULL) || (target_info.pixels == (double **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); profile=DestroyStringInfo(profile); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register double *p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p=source_info.pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(q)); if (source_info.channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(q)); *p++=GetLCMSPixel(source_info,GetPixelBlue(q)); } if (source_info.channels > 3) { *p=GetLCMSPixel(source_info,0); if (indexes != (IndexPacket *) NULL) *p=GetLCMSPixel(source_info,GetPixelIndex(indexes+x)); p++; } q++; } cmsDoTransform(transform[id],source_info.pixels[id], target_info.pixels[id],(unsigned int) image->columns); p=target_info.pixels[id]; q-=image->columns; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,SetLCMSPixel(target_info,*p)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); p++; if (target_info.channels > 1) { SetPixelGreen(q,SetLCMSPixel(target_info,*p)); p++; SetPixelBlue(q,SetLCMSPixel(target_info,*p)); p++; } if (target_info.channels > 3) { if (indexes != (IndexPacket *) NULL) SetPixelIndex(indexes+x,SetLCMSPixel(target_info,*p)); p++; } q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace); switch (signature) { case cmsSigRgbData: { image->type=image->matte == MagickFalse ? TrueColorType : TrueColorMatteType; break; } case cmsSigCmykData: { image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; break; } case cmsSigGrayData: { image->type=image->matte == MagickFalse ? GrayscaleType : GrayscaleMatteType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->x_resolution=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->y_resolution=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"CorruptImageProfile","`%s' (XMP)",image->filename); return(MagickFalse); } xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile) { (void) ThrowMagickException(&image->exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn","'%s' (XML)", image->filename); return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive) { char key[MaxTextExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(image,profile) == MagickFalse)) return(MagickTrue); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MaxTextExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if ((status != MagickFalse) && ((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0))) { const StringInfo *icc_profile; /* Continue to support deprecated color profile member. */ icc_profile=GetImageProfile(image,name); if (icc_profile != (const StringInfo *) NULL) { image->color_profile.length=GetStringInfoLength(icc_profile); image->color_profile.info=GetStringInfoDatum(icc_profile); } } if ((status != MagickFalse) && ((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0))) { const StringInfo *iptc_profile; /* Continue to support deprecated IPTC profile member. */ iptc_profile=GetImageProfile(image,name); if (iptc_profile != (const StringInfo *) NULL) { image->iptc_profile.length=GetStringInfoLength(iptc_profile); image->iptc_profile.info=GetStringInfoDatum(iptc_profile); } } if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile) { return(SetImageProfileInternal(image,name,profile,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickExport MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
deikstra.c
/* ============================================================================ Name : deikstra.c Author : qwinpin Version : Copyright : Your copyright notice Description : Hello World in C, Ansi-style ============================================================================ */ #include <time.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #define INF 1000000 #define TRUE 1 #define FALSE 0 void print_graph(int **a, int size); void test(); int find_single(int **a, int size) { int tmp; int *v = (int*) malloc(size * sizeof(int)); int *d = (int*) malloc(size * sizeof(int)); srand(time(NULL)); for (int i = 0; i < size; i++){ v[i] = TRUE; d[i] = INF; } d[0] = 0; int minindex = 0; int min = 0; while (minindex < INF){ minindex = INF; min = INF; for (int i = 0; i < size; i++){ if ((v[i]) && (d[i] < min)){ min = d[i]; minindex = i; } } if (minindex != INF){ for (int i = 0; i < size; i++){ if (a[minindex][i] > 0){ tmp = a[minindex][i] + min; if (tmp < d[i]){ d[i] = tmp; } } } v[minindex] = FALSE; } } return 0; } int find_parallel(int **a, int size, int threads){ int tmp, step; int *minArr = (int*) malloc(threads * sizeof(int)); int *minInd = (int*) malloc(threads * sizeof(int)); int i; int *v = (int*) malloc(size * sizeof(int)); int *d = (int*) malloc(size * sizeof(int)); step = size / threads; #pragma omp parallel num_threads(threads) { #pragma omp parallel for for (i = 0; i < size; i++){ v[i] = TRUE; d[i] = INF; } } d[0] = 0; int minindex = 0; int min = 0; #pragma omp parallel num_threads(threads) { #pragma omp parallel for for (i = 0; i < threads; i++){ minArr[i] = INF; minInd[i] = INF; } } while (minindex < INF){ minindex = INF; min = INF; // parallel part - split vector into chunks, find min in each, then compare #pragma omp parallel num_threads(threads) { #pragma omp for for (int j = 0; j < threads; j++){ for (int k = j * step; k < (j + 1)*step ; k++){ if ((v[k]) && (d[k]) < minArr[j]){ minArr[j] = d[k]; minInd[j] = k; } } } } for (int j = 0; j < threads; j++){ if (minArr[j] < min){ min = minArr[j]; minindex = minInd[j]; } minArr[j] = INF; minInd[j] = INF; } // process each vector element in parallel if (minindex != INF){ // #pragma omp parallel num_threads(threads) { // #pragma omp parallel for for (i = 0; i < size; i++){ if (a[minindex][i] > 0){ tmp = a[minindex][i] + min; if (tmp < d[i]){ d[i] = tmp; } } } v[minindex] = FALSE; } } } return 0; } void print_graph(int **a, int size){ printf("\nGraph\n"); for (int i = 0; i < size && i < 5; i++){ for (int j = 0; j < size && j < 5; j++){ printf("%d ", a[i][j]); } printf("\n"); } } int main(){ int evaluate; printf("Evaluate test or not? 1/0\n"); scanf("%d", &evaluate); if (evaluate){ test(); return 0; } int size; printf("Enter graph size\n"); scanf("%d", &size); if (size < 0 || size > 30000){ size = 10000; } int th; printf("Threads number\n"); scanf("%d", &th); if (th == 0){ printf("Bye"); } int **a = (int**) malloc(size * sizeof(int*)); int tmp; srand(time(NULL)); for (int i = 0; i < size; i++){ a[i] = (int*) malloc(size * sizeof(int)); } for (int i = 0; i < size; i++){ a[i][i] = 0; for (int j = i + 1; j < size; j++){ tmp = rand() % 10; a[i][j] = tmp; a[j][i] = tmp; } } clock_t start = clock(), diff; if (th == 1){ find_single(a, size); } else{ find_parallel(a, size, th); } diff = clock() - start; print_graph(a, size); printf("It took %li sec %li milisec", diff / CLOCKS_PER_SEC, diff * 1000 / CLOCKS_PER_SEC % 1000); for (int i = 0; i < size; i++){ free(a[i]); } free(a); return 0; } void test(){ int size = 1000; int threads = 17; int size_step = 1000; int size_max = 22000; unsigned short int tmp; srand(time(NULL)); FILE *f = fopen("d_process.txt", "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } int **a = (int**) malloc(size_max * sizeof(int*)); for (int i = 0; i < size_max; i++){ a[i] = (int*) malloc(size_max * sizeof(unsigned short int*)); } for (int i = 0; i < size_max; i++){ a[i][i] = 0; for (int j = i + 1; j < size_max; j++){ tmp = rand() % 10; a[i][j] = tmp; a[j][i] = tmp; } } for (int th = 1; th < threads; th++){ printf("Threads num %d\n", th); fprintf(f, "\nThreads_num: %d\n ", th); size = 1000; while (size <= size_max){ printf("Size %d\n", size); clock_t start = clock(), diff; if (th == 1){ find_single(a, size); } else{ find_parallel(a, size, th); } diff = clock() - start; fprintf(f, "%f, ", (double)diff / CLOCKS_PER_SEC * 1000); size = size + size_step; } } for (int i = 0; i < size_max; i++){ free(a[i]); } free(a); }
omp_hash_map.h
#ifndef OMP_HASH_MAP_H_ #define OMP_HASH_MAP_H_ #include <array> #include <functional> #include <memory> #include <vector> #include "omp.h" // A high performance concurrent hash map based on OpenMP. template <class K, class V, class H = std::hash<K>> class omp_hash_map { public: omp_hash_map(); ~omp_hash_map(); // Set the number of buckets in the container to be at least the specified value. void reserve(const size_t n_buckets) { const size_t n_rehashing_buckets = get_n_rehashing_buckets(n_buckets); rehash(n_rehashing_buckets); }; // Return the number of buckets. size_t get_n_buckets() const { return n_buckets; }; // Return the current load factor (the ratio between the number of keys and buckets). double get_load_factor() const { return static_cast<double>(n_keys) / n_buckets; } // Return the max load factor beyond which an automatic rehashing will occur. double get_max_load_factor() const { return max_load_factor; } // Set the max load factor beyond which an automatic rehashing will occur. void set_max_load_factor(const double max_load_factor) { this->max_load_factor = max_load_factor; } // Return the number of keys. size_t get_n_keys() const { return n_keys; } // Set the specified key to the specified value. void set(const K& key, const V& value); // Update the value of the specified key. // If the key does not exist, construct it with the default initializer first. void set(const K& key, const std::function<void(V&)>& setter); // Update the value of the specified key. // If the key does not exist, construct and set it to the default value passed in first. void set(const K& key, const std::function<void(V&)>& setter, const V& default_value); // Remove the specified key. void unset(const K& key); // Test if the specified key exists. bool has(const K& key); // Return a copy of the value of the specified key, or the default value if key does not exist. V get_copy_or_default(const K& key, const V& default_value); // Return the mapped value for the value of the specified key. // If the key does not exist, return the default value. template <class W> W map(const K& key, const std::function<W(const V&)>& mapper, const W& default_value); // Return the reduced value of the mapped values of all the keys. // If no key exists, return the default value. template <class W> W map_reduce( const std::function<W(const K&, const V&)>& mapper, const std::function<void(W&, const W&)>& reducer, const W& default_value); // Apply the handler to the value of the specific key, if it exists. void apply(const K& key, const std::function<void(const V&)>& handler); // Apply the handler to all the keys. void apply(const std::function<void(const K&, const V&)>& handler); // Clear all keys. void clear(); private: size_t n_keys; size_t n_buckets; double max_load_factor; size_t n_threads; // The entire hash map is divided into several segments (depends on how many threads). // Each segment can be locked and accessed independently in parallel. size_t n_segments; H hasher; std::vector<omp_lock_t> segment_locks; // For parallel rehashing (Require omp_set_nested(1)). std::vector<omp_lock_t> rehashing_segment_locks; constexpr static size_t N_INITIAL_BUCKETS = 11; constexpr static size_t N_SEGMENTS_PER_THREAD = 7; constexpr static double DEFAULT_MAX_LOAD_FACTOR = 1.0; struct hash_node { K key; V value; std::unique_ptr<hash_node> next; hash_node(const K& key, const V& value) : key(key), value(value){}; }; std::vector<std::unique_ptr<hash_node>> buckets; // Set the number of buckets to be at least the number of current keys times max load factor. void rehash() { reserve(n_keys / max_load_factor); } void rehash(const size_t n_rehashing_buckets); // Get the number of hash buckets to use. // This number shall be larger than or equal to the specified number. size_t get_n_rehashing_buckets(const size_t n_buckets) const; // Apply node_handler to the hash node which has the specific key. // If the key does not exist, apply to the unassociated node from the corresponding bucket. void hash_node_apply( const K& key, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler); // Apply node_handler to all the hash nodes. void hash_node_apply(const std::function<void(std::unique_ptr<hash_node>&)>& node_handler); // Recursively find the node with the specified key on the list starting from the node specified. // Then apply the specified handler to that node. // If the key does not exist, apply the handler to the unassociated node at the end of the list. void hash_node_apply_recursive( std::unique_ptr<hash_node>& node, const K& key, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler); // Recursively apply the handler to each node on the list from the node specified (post-order). void hash_node_apply_recursive( std::unique_ptr<hash_node>& node, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler); void lock_all_segments(); void unlock_all_segments(); }; template <class K, class V, class H> omp_hash_map<K, V, H>::omp_hash_map() { n_keys = 0; n_buckets = N_INITIAL_BUCKETS; buckets.resize(n_buckets); max_load_factor = DEFAULT_MAX_LOAD_FACTOR; n_threads = omp_get_max_threads(); n_segments = n_threads * N_SEGMENTS_PER_THREAD; segment_locks.resize(n_segments); rehashing_segment_locks.resize(n_segments); for (auto& lock : segment_locks) omp_init_lock(&lock); for (auto& lock : rehashing_segment_locks) omp_init_lock(&lock); } template <class K, class V, class H> omp_hash_map<K, V, H>::~omp_hash_map() { clear(); for (auto& lock : segment_locks) omp_destroy_lock(&lock); for (auto& lock : rehashing_segment_locks) omp_destroy_lock(&lock); } template <class K, class V, class H> void omp_hash_map<K, V, H>::rehash(const size_t n_rehashing_buckets) { lock_all_segments(); // No decrease in the number of buckets. if (n_buckets >= n_rehashing_buckets) { unlock_all_segments(); return; } // Rehash. std::vector<std::unique_ptr<hash_node>> rehashing_buckets(n_rehashing_buckets); const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { const auto& rehashing_node_handler = [&](std::unique_ptr<hash_node>& rehashing_node) { rehashing_node = std::move(node); rehashing_node->next.reset(); }; const K& key = node->key; const size_t hash_value = hasher(key); const size_t bucket_id = hash_value % n_rehashing_buckets; const size_t segment_id = bucket_id % n_segments; auto& lock = rehashing_segment_locks[segment_id]; omp_set_lock(&lock); hash_node_apply_recursive(rehashing_buckets[bucket_id], key, rehashing_node_handler); omp_unset_lock(&lock); }; #pragma omp parallel for for (size_t i = 0; i < n_buckets; i++) { hash_node_apply_recursive(buckets[i], node_handler); } buckets = std::move(rehashing_buckets); n_buckets = n_rehashing_buckets; unlock_all_segments(); } template <class K, class V, class H> size_t omp_hash_map<K, V, H>::get_n_rehashing_buckets(const size_t n_buckets_in) const { // Returns a number that is greater than or equal to n_buckets_in. // That number is either a prime number itself, or a product of two prime numbers. constexpr size_t PRIME_NUMBERS[] = {11, 17, 29, 47, 79, 127, 211, 337, 547, 887, 1433, 2311, 3739, 6053, 9791, 15858, 25667, 41539, 67213, 104729}; constexpr size_t N_PRIME_NUMBERS = sizeof(PRIME_NUMBERS) / sizeof(size_t); constexpr size_t LAST_PRIME_NUMBER = PRIME_NUMBERS[N_PRIME_NUMBERS - 1]; constexpr size_t DIVISION_FACTOR = 15858; size_t remaining_factor = n_buckets_in; size_t n_rehashing_buckets = 1; for (size_t i = 0; i < 3; i++) { if (remaining_factor > LAST_PRIME_NUMBER) { remaining_factor /= DIVISION_FACTOR; n_rehashing_buckets *= DIVISION_FACTOR; } } if (remaining_factor > LAST_PRIME_NUMBER) throw std::invalid_argument("n_buckets too large"); size_t left = 0, right = N_PRIME_NUMBERS - 1; while (left < right) { size_t mid = (left + right) / 2; if (PRIME_NUMBERS[mid] < remaining_factor) { left = mid + 1; } else { right = mid; } } n_rehashing_buckets *= PRIME_NUMBERS[left]; return n_rehashing_buckets; } template <class K, class V, class H> void omp_hash_map<K, V, H>::set(const K& key, const V& value) { const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { if (!node) { node.reset(new hash_node(key, value)); #pragma omp atomic n_keys++; } else { node->value = value; } }; hash_node_apply(key, node_handler); if (n_keys >= n_buckets * max_load_factor) rehash(); } template <class K, class V, class H> void omp_hash_map<K, V, H>::set(const K& key, const std::function<void(V&)>& setter) { const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { if (!node) { node.reset(new hash_node(key, V())); setter(node->value); #pragma omp atomic n_keys++; } else { setter(node->value); } }; hash_node_apply(key, node_handler); if (n_keys >= n_buckets * max_load_factor) rehash(); } template <class K, class V, class H> void omp_hash_map<K, V, H>::set( const K& key, const std::function<void(V&)>& setter, const V& default_value) { const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { if (!node) { V value(default_value); setter(value); node.reset(new hash_node(key, value)); #pragma omp atomic n_keys++; } else { setter(node->value); } }; hash_node_apply(key, node_handler); if (n_keys >= n_buckets * max_load_factor) rehash(); } template <class K, class V, class H> void omp_hash_map<K, V, H>::unset(const K& key) { const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { if (node) { node = std::move(node->next); #pragma omp atomic n_keys--; } }; hash_node_apply(key, node_handler); } template <class K, class V, class H> bool omp_hash_map<K, V, H>::has(const K& key) { bool has_key = false; const auto& node_handler = [&](const std::unique_ptr<hash_node>& node) { if (node) has_key = true; }; hash_node_apply(key, node_handler); return has_key; } template <class K, class V, class H> V omp_hash_map<K, V, H>::get_copy_or_default(const K& key, const V& default_value) { V value(default_value); const auto& node_handler = [&](const std::unique_ptr<hash_node>& node) { if (node) value = node->value; }; hash_node_apply(key, node_handler); return value; } template <class K, class V, class H> template <class W> W omp_hash_map<K, V, H>::map( const K& key, const std::function<W(const V&)>& mapper, const W& default_value) { W mapped_value(default_value); const auto& node_handler = [&](const std::unique_ptr<hash_node>& node) { if (node) mapped_value = mapper(node->value); }; hash_node_apply(key, node_handler); return mapped_value; } template <class K, class V, class H> template <class W> W omp_hash_map<K, V, H>::map_reduce( const std::function<W(const K&, const V&)>& mapper, const std::function<void(W&, const W&)>& reducer, const W& default_value) { std::vector<W> thread_reduced_values(n_threads, default_value); W reduced_value = default_value; const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { const size_t thread_id = omp_get_thread_num(); const W& mapped_value = mapper(node->key, node->value); reducer(thread_reduced_values[thread_id], mapped_value); }; hash_node_apply(node_handler); for (const auto& value : thread_reduced_values) reducer(reduced_value, value); return reduced_value; } template <class K, class V, class H> void omp_hash_map<K, V, H>::apply(const K& key, const std::function<void(const V&)>& handler) { const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { if (node) handler(node->value); }; hash_node_apply(key, node_handler); } template <class K, class V, class H> void omp_hash_map<K, V, H>::apply(const std::function<void(const K&, const V&)>& handler) { const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { handler(node->key, node->value); }; hash_node_apply(node_handler); } template <class K, class V, class H> void omp_hash_map<K, V, H>::clear() { lock_all_segments(); #pragma omp parallel for for (size_t i = 0; i < n_buckets; i++) { buckets[i].reset(); } buckets.resize(N_INITIAL_BUCKETS); for (auto& bucket : buckets) bucket.reset(); n_keys = 0; unlock_all_segments(); } template <class K, class V, class H> void omp_hash_map<K, V, H>::hash_node_apply( const K& key, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) { const size_t hash_value = hasher(key); bool applied = false; while (!applied) { const size_t n_buckets_snapshot = n_buckets; const size_t bucket_id = hash_value % n_buckets_snapshot; const size_t segment_id = bucket_id % n_segments; auto& lock = segment_locks[segment_id]; omp_set_lock(&lock); if (n_buckets_snapshot != n_buckets) { omp_unset_lock(&lock); continue; } hash_node_apply_recursive(buckets[bucket_id], key, node_handler); omp_unset_lock(&lock); applied = true; } } template <class K, class V, class H> void omp_hash_map<K, V, H>::hash_node_apply( const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) { lock_all_segments(); // For a good hash function, a static schedule shall provide both a good balance and speed. #pragma omp parallel for for (size_t i = 0; i < n_buckets; i++) { hash_node_apply_recursive(buckets[i], node_handler); } unlock_all_segments(); } template <class K, class V, class H> void omp_hash_map<K, V, H>::hash_node_apply_recursive( std::unique_ptr<hash_node>& node, const K& key, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) { if (node) { if (node->key == key) { node_handler(node); } else { hash_node_apply_recursive(node->next, key, node_handler); } } else { node_handler(node); } } template <class K, class V, class H> void omp_hash_map<K, V, H>::hash_node_apply_recursive( std::unique_ptr<hash_node>& node, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) { if (node) { // Post-order traversal for rehashing. hash_node_apply_recursive(node->next, node_handler); node_handler(node); } } template <class K, class V, class H> void omp_hash_map<K, V, H>::lock_all_segments() { for (auto& lock : segment_locks) omp_set_lock(&lock); } template <class K, class V, class H> void omp_hash_map<K, V, H>::unlock_all_segments() { for (auto& lock : segment_locks) omp_unset_lock(&lock); } #endif
convolution_sgemm_pack4to1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4to1_bf16s_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; #if __aarch64__ if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, 8u, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 8u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 8u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator); #else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 8u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 8u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator); #endif { #if __aarch64__ int nn_size = size / 12; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; unsigned short* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x12 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" "st1 {v4.4h}, [%1], #8 \n" "st1 {v1.8h}, [%1], #16 \n" "st1 {v5.4h}, [%1], #8 \n" "sub %0, %0, #64 \n" "st1 {v2.8h}, [%1], #16 \n" "st1 {v6.4h}, [%1], #8 \n" "st1 {v3.8h}, [%1], #16 \n" "st1 {v7.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size * 4; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #else int nn_size = size >> 3; int remain_size_start = 0; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); #else unsigned short* tmpptr = tmp.channel(i / 8); #endif for (int q = 0; q < inch; q++) { const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x8 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #256] \n" "vld4.u16 {d0-d3}, [%0]! \n" "pld [%0, #256] \n" "vld4.u16 {d4-d7}, [%0] \n" "sub %0, %0, #32 \n" "vst1.u16 {d0}, [%1 :64]! \n" "vst1.u16 {d4}, [%1 :64]! \n" "vst1.u16 {d1}, [%1 :64]! \n" "vst1.u16 {d5}, [%1 :64]! \n" "vst1.u16 {d2}, [%1 :64]! \n" "vst1.u16 {d6}, [%1 :64]! \n" "vst1.u16 {d3}, [%1 :64]! \n" "vst1.u16 {d7}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #endif for (int q = 0; q < inch; q++) { const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x4 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n" "st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld4.u16 {d0-d3}, [%0 :64] \n" "vst1.u16 {d0-d3}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #endif for (int q = 0; q < inch; q++) { const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #64] \n" "vld1.u16 {d0}, [%0 :64] \n" "vst1.u16 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); #endif // __aarch64__ img0 += size * 4; } } } } int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p + 1); unsigned short* outptr2 = top_blob.channel(p + 2); unsigned short* outptr3 = top_blob.channel(p + 3); unsigned short* outptr4 = top_blob.channel(p + 4); unsigned short* outptr5 = top_blob.channel(p + 5); unsigned short* outptr6 = top_blob.channel(p + 6); unsigned short* outptr7 = top_blob.channel(p + 7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 11 < size; i += 12) { unsigned short* tmpptr = tmp.channel(i / 12); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v30.4s, v31.4s}, [%22] \n" "dup v8.4s, v30.s[0] \n" "dup v9.4s, v30.s[0] \n" "dup v10.4s, v30.s[0] \n" "dup v11.4s, v30.s[1] \n" "dup v12.4s, v30.s[1] \n" "dup v13.4s, v30.s[1] \n" "dup v14.4s, v30.s[2] \n" "dup v15.4s, v30.s[2] \n" "dup v16.4s, v30.s[2] \n" "dup v17.4s, v30.s[3] \n" "dup v18.4s, v30.s[3] \n" "dup v19.4s, v30.s[3] \n" "dup v20.4s, v31.s[0] \n" "dup v21.4s, v31.s[0] \n" "dup v22.4s, v31.s[0] \n" "dup v23.4s, v31.s[1] \n" "dup v24.4s, v31.s[1] \n" "dup v25.4s, v31.s[1] \n" "dup v26.4s, v31.s[2] \n" "dup v27.4s, v31.s[2] \n" "dup v28.4s, v31.s[2] \n" "dup v29.4s, v31.s[3] \n" "dup v30.4s, v31.s[3] \n" "dup v31.4s, v31.s[3] \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v23.4s, v0.4s, v5.s[1] \n" "fmla v26.4s, v0.4s, v5.s[2] \n" "fmla v29.4s, v0.4s, v5.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v21.4s, v1.4s, v5.s[0] \n" "fmla v24.4s, v1.4s, v5.s[1] \n" "fmla v27.4s, v1.4s, v5.s[2] \n" "fmla v30.4s, v1.4s, v5.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "fmla v22.4s, v2.4s, v5.s[0] \n" "fmla v25.4s, v2.4s, v5.s[1] \n" "fmla v28.4s, v2.4s, v5.s[2] \n" "fmla v31.4s, v2.4s, v5.s[3] \n" "fmla v8.4s, v3.4s, v6.s[0] \n" "fmla v11.4s, v3.4s, v6.s[1] \n" "fmla v14.4s, v3.4s, v6.s[2] \n" "fmla v17.4s, v3.4s, v6.s[3] \n" "fmla v20.4s, v3.4s, v7.s[0] \n" "fmla v23.4s, v3.4s, v7.s[1] \n" "fmla v26.4s, v3.4s, v7.s[2] \n" "fmla v29.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v9.4s, v0.4s, v6.s[0] \n" "fmla v12.4s, v0.4s, v6.s[1] \n" "fmla v15.4s, v0.4s, v6.s[2] \n" "fmla v18.4s, v0.4s, v6.s[3] \n" "fmla v21.4s, v0.4s, v7.s[0] \n" "fmla v24.4s, v0.4s, v7.s[1] \n" "fmla v27.4s, v0.4s, v7.s[2] \n" "fmla v30.4s, v0.4s, v7.s[3] \n" "fmla v10.4s, v1.4s, v6.s[0] \n" "fmla v13.4s, v1.4s, v6.s[1] \n" "fmla v16.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v22.4s, v1.4s, v7.s[0] \n" "fmla v25.4s, v1.4s, v7.s[1] \n" "fmla v28.4s, v1.4s, v7.s[2] \n" "fmla v31.4s, v1.4s, v7.s[3] \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v2.4s, v4.s[0] \n" "fmla v11.4s, v2.4s, v4.s[1] \n" "fmla v14.4s, v2.4s, v4.s[2] \n" "fmla v17.4s, v2.4s, v4.s[3] \n" "fmla v20.4s, v2.4s, v5.s[0] \n" "fmla v23.4s, v2.4s, v5.s[1] \n" "fmla v26.4s, v2.4s, v5.s[2] \n" "fmla v29.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v4.s[0] \n" "fmla v12.4s, v3.4s, v4.s[1] \n" "fmla v15.4s, v3.4s, v4.s[2] \n" "fmla v18.4s, v3.4s, v4.s[3] \n" "fmla v21.4s, v3.4s, v5.s[0] \n" "fmla v24.4s, v3.4s, v5.s[1] \n" "fmla v27.4s, v3.4s, v5.s[2] \n" "fmla v30.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v10.4s, v0.4s, v4.s[0] \n" "fmla v13.4s, v0.4s, v4.s[1] \n" "fmla v16.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v22.4s, v0.4s, v5.s[0] \n" "fmla v25.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v31.4s, v0.4s, v5.s[3] \n" "fmla v8.4s, v1.4s, v6.s[0] \n" "fmla v11.4s, v1.4s, v6.s[1] \n" "fmla v14.4s, v1.4s, v6.s[2] \n" "fmla v17.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v23.4s, v1.4s, v7.s[1] \n" "fmla v26.4s, v1.4s, v7.s[2] \n" "fmla v29.4s, v1.4s, v7.s[3] \n" "fmla v9.4s, v2.4s, v6.s[0] \n" "fmla v12.4s, v2.4s, v6.s[1] \n" "fmla v15.4s, v2.4s, v6.s[2] \n" "fmla v18.4s, v2.4s, v6.s[3] \n" "fmla v21.4s, v2.4s, v7.s[0] \n" "fmla v24.4s, v2.4s, v7.s[1] \n" "fmla v27.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v3.4s, v6.s[0] \n" "fmla v13.4s, v3.4s, v6.s[1] \n" "fmla v16.4s, v3.4s, v6.s[2] \n" "fmla v19.4s, v3.4s, v6.s[3] \n" "fmla v22.4s, v3.4s, v7.s[0] \n" "fmla v25.4s, v3.4s, v7.s[1] \n" "fmla v28.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n" "st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n" "st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n" "st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n" "st1 {v20.4h, v21.4h, v22.4h}, [%5], #24 \n" "st1 {v23.4h, v24.4h, v25.4h}, [%6], #24 \n" "st1 {v26.4h, v27.4h, v28.4h}, [%7], #24 \n" "st1 {v29.4h, v30.4h, v31.4h}, [%8], #24 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v30.4s, v31.4s}, [%22] \n" "dup v16.4s, v30.s[0] \n" "dup v17.4s, v30.s[0] \n" "dup v18.4s, v30.s[1] \n" "dup v19.4s, v30.s[1] \n" "dup v20.4s, v30.s[2] \n" "dup v21.4s, v30.s[2] \n" "dup v22.4s, v30.s[3] \n" "dup v23.4s, v30.s[3] \n" "dup v24.4s, v31.s[0] \n" "dup v25.4s, v31.s[0] \n" "dup v26.4s, v31.s[1] \n" "dup v27.4s, v31.s[1] \n" "dup v28.4s, v31.s[2] \n" "dup v29.4s, v31.s[2] \n" "dup v30.4s, v31.s[3] \n" "dup v31.4s, v31.s[3] \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v18.4s, v0.4s, v4.s[1] \n" "fmla v20.4s, v0.4s, v4.s[2] \n" "fmla v22.4s, v0.4s, v4.s[3] \n" "fmla v24.4s, v0.4s, v5.s[0] \n" "fmla v26.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v30.4s, v0.4s, v5.s[3] \n" "fmla v17.4s, v1.4s, v4.s[0] \n" "fmla v19.4s, v1.4s, v4.s[1] \n" "fmla v21.4s, v1.4s, v4.s[2] \n" "fmla v23.4s, v1.4s, v4.s[3] \n" "fmla v25.4s, v1.4s, v5.s[0] \n" "fmla v27.4s, v1.4s, v5.s[1] \n" "fmla v29.4s, v1.4s, v5.s[2] \n" "fmla v31.4s, v1.4s, v5.s[3] \n" "fmla v16.4s, v2.4s, v6.s[0] \n" "fmla v18.4s, v2.4s, v6.s[1] \n" "fmla v20.4s, v2.4s, v6.s[2] \n" "fmla v22.4s, v2.4s, v6.s[3] \n" "fmla v24.4s, v2.4s, v7.s[0] \n" "fmla v26.4s, v2.4s, v7.s[1] \n" "fmla v28.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v17.4s, v3.4s, v6.s[0] \n" "fmla v19.4s, v3.4s, v6.s[1] \n" "fmla v21.4s, v3.4s, v6.s[2] \n" "fmla v23.4s, v3.4s, v6.s[3] \n" "fmla v25.4s, v3.4s, v7.s[0] \n" "fmla v27.4s, v3.4s, v7.s[1] \n" "fmla v29.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v12.4s, v8.s[0] \n" "fmla v18.4s, v12.4s, v8.s[1] \n" "fmla v20.4s, v12.4s, v8.s[2] \n" "fmla v22.4s, v12.4s, v8.s[3] \n" "fmla v24.4s, v12.4s, v9.s[0] \n" "fmla v26.4s, v12.4s, v9.s[1] \n" "fmla v28.4s, v12.4s, v9.s[2] \n" "fmla v30.4s, v12.4s, v9.s[3] \n" "fmla v17.4s, v13.4s, v8.s[0] \n" "fmla v19.4s, v13.4s, v8.s[1] \n" "fmla v21.4s, v13.4s, v8.s[2] \n" "fmla v23.4s, v13.4s, v8.s[3] \n" "fmla v25.4s, v13.4s, v9.s[0] \n" "fmla v27.4s, v13.4s, v9.s[1] \n" "fmla v29.4s, v13.4s, v9.s[2] \n" "fmla v31.4s, v13.4s, v9.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v10.s[0] \n" "fmla v18.4s, v14.4s, v10.s[1] \n" "fmla v20.4s, v14.4s, v10.s[2] \n" "fmla v22.4s, v14.4s, v10.s[3] \n" "fmla v24.4s, v14.4s, v11.s[0] \n" "fmla v26.4s, v14.4s, v11.s[1] \n" "fmla v28.4s, v14.4s, v11.s[2] \n" "fmla v30.4s, v14.4s, v11.s[3] \n" "fmla v17.4s, v15.4s, v10.s[0] \n" "fmla v19.4s, v15.4s, v10.s[1] \n" "fmla v21.4s, v15.4s, v10.s[2] \n" "fmla v23.4s, v15.4s, v10.s[3] \n" "fmla v25.4s, v15.4s, v11.s[0] \n" "fmla v27.4s, v15.4s, v11.s[1] \n" "fmla v29.4s, v15.4s, v11.s[2] \n" "fmla v31.4s, v15.4s, v11.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%1], #16 \n" "st1 {v18.4h, v19.4h}, [%2], #16 \n" "st1 {v20.4h, v21.4h}, [%3], #16 \n" "st1 {v22.4h, v23.4h}, [%4], #16 \n" "st1 {v24.4h, v25.4h}, [%5], #16 \n" "st1 {v26.4h, v27.4h}, [%6], #16 \n" "st1 {v28.4h, v29.4h}, [%7], #16 \n" "st1 {v30.4h, v31.4h}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v22.4s, v23.4s}, [%22] \n" "dup v16.4s, v22.s[0] \n" "dup v17.4s, v22.s[1] \n" "dup v18.4s, v22.s[2] \n" "dup v19.4s, v22.s[3] \n" "dup v20.4s, v23.s[0] \n" "dup v21.4s, v23.s[1] \n" "dup v22.4s, v23.s[2] \n" "dup v23.4s, v23.s[3] \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v17.4s, v0.4s, v4.s[1] \n" "fmla v18.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v21.4s, v0.4s, v5.s[1] \n" "fmla v22.4s, v0.4s, v5.s[2] \n" "fmla v23.4s, v0.4s, v5.s[3] \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v1.4s, v6.s[0] \n" "fmla v17.4s, v1.4s, v6.s[1] \n" "fmla v18.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v21.4s, v1.4s, v7.s[1] \n" "fmla v22.4s, v1.4s, v7.s[2] \n" "fmla v23.4s, v1.4s, v7.s[3] \n" "fmla v16.4s, v2.4s, v8.s[0] \n" "fmla v17.4s, v2.4s, v8.s[1] \n" "fmla v18.4s, v2.4s, v8.s[2] \n" "fmla v19.4s, v2.4s, v8.s[3] \n" "fmla v20.4s, v2.4s, v9.s[0] \n" "fmla v21.4s, v2.4s, v9.s[1] \n" "fmla v22.4s, v2.4s, v9.s[2] \n" "fmla v23.4s, v2.4s, v9.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v3.4s, v10.s[0] \n" "fmla v17.4s, v3.4s, v10.s[1] \n" "fmla v18.4s, v3.4s, v10.s[2] \n" "fmla v19.4s, v3.4s, v10.s[3] \n" "fmla v20.4s, v3.4s, v11.s[0] \n" "fmla v21.4s, v3.4s, v11.s[1] \n" "fmla v22.4s, v3.4s, v11.s[2] \n" "fmla v23.4s, v3.4s, v11.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h}, [%1], #8 \n" "st1 {v17.4h}, [%2], #8 \n" "st1 {v18.4h}, [%3], #8 \n" "st1 {v19.4h}, [%4], #8 \n" "st1 {v20.4h}, [%5], #8 \n" "st1 {v21.4h}, [%6], #8 \n" "st1 {v22.4h}, [%7], #8 \n" "st1 {v23.4h}, [%8], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < size; i++) { unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.4s, v17.4s}, [%22] \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%9, #64] \n" "ld1 {v0.4h}, [%9], #8 \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v5.4s, v0.s[0] \n" "fmla v18.4s, v6.4s, v0.s[1] \n" "fmla v19.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%10, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[2] \n" "fmla v17.4s, v9.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v18.4s, v10.4s, v0.s[3] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "st1 {v16.h}[0], [%1], #2 \n" "st1 {v16.h}[1], [%2], #2 \n" "st1 {v16.h}[2], [%3], #2 \n" "st1 {v16.h}[3], [%4], #2 \n" "st1 {v17.h}[0], [%5], #2 \n" "st1 {v17.h}[1], [%6], #2 \n" "st1 {v17.h}[2], [%7], #2 \n" "st1 {v17.h}[3], [%8], #2 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "r"(biasptr) // %22 : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); } } remain_outch_start += nn_outch << 3; nn_outch = (outch - remain_outch_start) >> 2; #else // __aarch64__ nn_outch = outch >> 2; #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p + 1); unsigned short* outptr2 = top_blob.channel(p + 2); unsigned short* outptr3 = top_blob.channel(p + 3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; #if __aarch64__ for (; i + 11 < size; i += 12) { unsigned short* tmpptr = tmp.channel(i / 12); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v19.4s}, [%14] \n" "dup v8.4s, v19.s[0] \n" "dup v9.4s, v19.s[0] \n" "dup v10.4s, v19.s[0] \n" "dup v11.4s, v19.s[1] \n" "dup v12.4s, v19.s[1] \n" "dup v13.4s, v19.s[1] \n" "dup v14.4s, v19.s[2] \n" "dup v15.4s, v19.s[2] \n" "dup v16.4s, v19.s[2] \n" "dup v17.4s, v19.s[3] \n" "dup v18.4s, v19.s[3] \n" "dup v19.4s, v19.s[3] \n" "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "fmla v8.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v14.4s, v3.4s, v5.s[2] \n" "fmla v17.4s, v3.4s, v5.s[3] \n" "fmla v9.4s, v20.4s, v5.s[0] \n" "fmla v12.4s, v20.4s, v5.s[1] \n" "fmla v15.4s, v20.4s, v5.s[2] \n" "fmla v18.4s, v20.4s, v5.s[3] \n" "fmla v10.4s, v21.4s, v5.s[0] \n" "fmla v13.4s, v21.4s, v5.s[1] \n" "fmla v16.4s, v21.4s, v5.s[2] \n" "fmla v19.4s, v21.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v8.4s, v22.4s, v6.s[0] \n" "fmla v11.4s, v22.4s, v6.s[1] \n" "fmla v14.4s, v22.4s, v6.s[2] \n" "fmla v17.4s, v22.4s, v6.s[3] \n" "fmla v9.4s, v23.4s, v6.s[0] \n" "fmla v12.4s, v23.4s, v6.s[1] \n" "fmla v15.4s, v23.4s, v6.s[2] \n" "fmla v18.4s, v23.4s, v6.s[3] \n" "fmla v10.4s, v24.4s, v6.s[0] \n" "fmla v13.4s, v24.4s, v6.s[1] \n" "fmla v16.4s, v24.4s, v6.s[2] \n" "fmla v19.4s, v24.4s, v6.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v25.4s, v7.s[0] \n" "fmla v11.4s, v25.4s, v7.s[1] \n" "fmla v14.4s, v25.4s, v7.s[2] \n" "fmla v17.4s, v25.4s, v7.s[3] \n" "fmla v9.4s, v26.4s, v7.s[0] \n" "fmla v12.4s, v26.4s, v7.s[1] \n" "fmla v15.4s, v26.4s, v7.s[2] \n" "fmla v18.4s, v26.4s, v7.s[3] \n" "fmla v10.4s, v27.4s, v7.s[0] \n" "fmla v13.4s, v27.4s, v7.s[1] \n" "fmla v16.4s, v27.4s, v7.s[2] \n" "fmla v19.4s, v27.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n" "st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n" "st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n" "st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < size; i += 8) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4); #else unsigned short* tmpptr = tmp.channel(i / 8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v15.4s}, [%14] \n" "dup v8.4s, v15.s[0] \n" "dup v9.4s, v15.s[0] \n" "dup v10.4s, v15.s[1] \n" "dup v11.4s, v15.s[1] \n" "dup v12.4s, v15.s[2] \n" "dup v13.4s, v15.s[2] \n" "dup v14.4s, v15.s[3] \n" "dup v15.4s, v15.s[3] \n" "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v10.4s, v0.4s, v4.s[1] \n" "fmla v12.4s, v0.4s, v4.s[2] \n" "fmla v14.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v11.4s, v1.4s, v4.s[1] \n" "fmla v13.4s, v1.4s, v4.s[2] \n" "fmla v15.4s, v1.4s, v4.s[3] \n" "fmla v8.4s, v2.4s, v5.s[0] \n" "fmla v10.4s, v2.4s, v5.s[1] \n" "fmla v12.4s, v2.4s, v5.s[2] \n" "fmla v14.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v13.4s, v3.4s, v5.s[2] \n" "fmla v15.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v8.4s, v16.4s, v6.s[0] \n" "fmla v10.4s, v16.4s, v6.s[1] \n" "fmla v12.4s, v16.4s, v6.s[2] \n" "fmla v14.4s, v16.4s, v6.s[3] \n" "fmla v9.4s, v17.4s, v6.s[0] \n" "fmla v11.4s, v17.4s, v6.s[1] \n" "fmla v13.4s, v17.4s, v6.s[2] \n" "fmla v15.4s, v17.4s, v6.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v18.4s, v7.s[0] \n" "fmla v10.4s, v18.4s, v7.s[1] \n" "fmla v12.4s, v18.4s, v7.s[2] \n" "fmla v14.4s, v18.4s, v7.s[3] \n" "fmla v9.4s, v19.4s, v7.s[0] \n" "fmla v11.4s, v19.4s, v7.s[1] \n" "fmla v13.4s, v19.4s, v7.s[2] \n" "fmla v15.4s, v19.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%1], #16 \n" "st1 {v10.4h, v11.4h}, [%2], #16 \n" "st1 {v12.4h, v13.4h}, [%3], #16 \n" "st1 {v14.4h, v15.4h}, [%4], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "vld1.f32 {d30-d31}, [%14] \n" "vdup.f32 q8, d30[0] \n" "vdup.f32 q9, d30[0] \n" "vdup.f32 q10, d30[1] \n" "vdup.f32 q11, d30[1] \n" "vdup.f32 q12, d31[0] \n" "vdup.f32 q13, d31[0] \n" "vdup.f32 q14, d31[1] \n" "vdup.f32 q15, d31[1] \n" "0: \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5]! \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q10, q0, d8[1] \n" "vmla.f32 q12, q0, d9[0] \n" "vmla.f32 q14, q0, d9[1] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q11, q1, d8[1] \n" "vmla.f32 q13, q1, d9[0] \n" "vmla.f32 q15, q1, d9[1] \n" "vmla.f32 q8, q2, d10[0] \n" "vmla.f32 q10, q2, d10[1] \n" "vmla.f32 q12, q2, d11[0] \n" "vmla.f32 q14, q2, d11[1] \n" "vmla.f32 q9, q3, d10[0] \n" "vmla.f32 q11, q3, d10[1] \n" "vmla.f32 q13, q3, d11[0] \n" "vmla.f32 q15, q3, d11[1] \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q0, d12[0] \n" "vmla.f32 q10, q0, d12[1] \n" "vmla.f32 q12, q0, d13[0] \n" "vmla.f32 q14, q0, d13[1] \n" "vmla.f32 q9, q1, d12[0] \n" "vmla.f32 q11, q1, d12[1] \n" "vmla.f32 q13, q1, d13[0] \n" "vmla.f32 q15, q1, d13[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d14[0] \n" "vmla.f32 q10, q2, d14[1] \n" "vmla.f32 q12, q2, d15[0] \n" "vmla.f32 q14, q2, d15[1] \n" "vmla.f32 q9, q3, d14[0] \n" "vmla.f32 q11, q3, d14[1] \n" "vmla.f32 q13, q3, d15[0] \n" "vmla.f32 q15, q3, d15[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d21, q11, #16 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d28, q14, #16 \n" "vshrn.u32 d29, q15, #16 \n" "vst1.u16 {d16-d17}, [%1 :64]! \n" "vst1.u16 {d20-d21}, [%2 :64]! \n" "vst1.u16 {d24-d25}, [%3 :64]! \n" "vst1.u16 {d28-d29}, [%4 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < size; i += 4) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4); #else unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v11.4s}, [%14] \n" "dup v8.4s, v11.s[0] \n" "dup v9.4s, v11.s[1] \n" "dup v10.4s, v11.s[2] \n" "dup v11.4s, v11.s[3] \n" "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v0.4s, v4.s[1] \n" "fmla v10.4s, v0.4s, v4.s[2] \n" "fmla v11.4s, v0.4s, v4.s[3] \n" "fmla v8.4s, v1.4s, v5.s[0] \n" "fmla v9.4s, v1.4s, v5.s[1] \n" "fmla v10.4s, v1.4s, v5.s[2] \n" "fmla v11.4s, v1.4s, v5.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v2.4s, v6.s[0] \n" "fmla v9.4s, v2.4s, v6.s[1] \n" "fmla v10.4s, v2.4s, v6.s[2] \n" "fmla v11.4s, v2.4s, v6.s[3] \n" "fmla v8.4s, v3.4s, v7.s[0] \n" "fmla v9.4s, v3.4s, v7.s[1] \n" "fmla v10.4s, v3.4s, v7.s[2] \n" "fmla v11.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "st1 {v8.4h}, [%1], #8 \n" "st1 {v9.4h}, [%2], #8 \n" "st1 {v10.4h}, [%3], #8 \n" "st1 {v11.4h}, [%4], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "vld1.f32 {d22-d23}, [%14] \n" "vdup.f32 q8, d22[0] \n" "vdup.f32 q9, d22[1] \n" "vdup.f32 q10, d23[0] \n" "vdup.f32 q11, d23[1] \n" "0: \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5]! \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q0, d8[1] \n" "vmla.f32 q10, q0, d9[0] \n" "vmla.f32 q11, q0, d9[1] \n" "vmla.f32 q8, q1, d10[0] \n" "vmla.f32 q9, q1, d10[1] \n" "vmla.f32 q10, q1, d11[0] \n" "vmla.f32 q11, q1, d11[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d12[0] \n" "vmla.f32 q9, q2, d12[1] \n" "vmla.f32 q10, q2, d13[0] \n" "vmla.f32 q11, q2, d13[1] \n" "vmla.f32 q8, q3, d14[0] \n" "vmla.f32 q9, q3, d14[1] \n" "vmla.f32 q10, q3, d15[0] \n" "vmla.f32 q11, q3, d15[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d18, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d22, q11, #16 \n" "vst1.u16 {d16}, [%1 :64]! \n" "vst1.u16 {d18}, [%2 :64]! \n" "vst1.u16 {d20}, [%3 :64]! \n" "vst1.u16 {d22}, [%4 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4); #else unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v8.4s}, [%14] \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "shrn v8.4h, v8.4s, #16 \n" "st1 {v8.h}[0], [%1], #2 \n" "st1 {v8.h}[1], [%2], #2 \n" "st1 {v8.h}[2], [%3], #2 \n" "st1 {v8.h}[3], [%4], #2 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "vld1.f32 {d16-d17}, [%14] \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5]! \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6]! \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16[0]}, [%1]! \n" "vst1.u16 {d16[1]}, [%2]! \n" "vst1.u16 {d16[2]}, [%3]! \n" "vst1.u16 {d16[3]}, [%4]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(tmpptr), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(tmpptr), "6"(kptr), "r"(biasptr) // %14 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { unsigned short* outptr0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; int i = 0; #if __aarch64__ for (; i + 11 < size; i += 12) { unsigned short* tmpptr = tmp.channel(i / 12); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4); int nn = inch * maxk; // inch always > 0 asm volatile( "dup v8.4s, %w8 \n" "dup v9.4s, %w8 \n" "dup v10.4s, %w8 \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v5.4s, v3.4s, v4.s[1] \n" "fmla v6.4s, v12.4s, v4.s[1] \n" "fmla v7.4s, v13.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v8.4s, v14.4s, v4.s[2] \n" "fmla v9.4s, v15.4s, v4.s[2] \n" "fmla v10.4s, v16.4s, v4.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v5.4s, v17.4s, v4.s[3] \n" "fmla v6.4s, v18.4s, v4.s[3] \n" "fmla v7.4s, v19.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v5.4s \n" "fadd v9.4s, v9.4s, v6.4s \n" "fadd v10.4s, v10.4s, v7.4s \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } #endif // __aarch64__ for (; i + 7 < size; i += 8) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else unsigned short* tmpptr = tmp.channel(i / 8); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "dup v8.4s, %w8 \n" "dup v9.4s, %w8 \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[1] \n" "fmla v11.4s, v3.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v8.4s, v12.4s, v4.s[2] \n" "fmla v9.4s, v13.4s, v4.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v14.4s, v4.s[3] \n" "fmla v11.4s, v15.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "vdup.f32 q8, %8 \n" "vdup.f32 q9, %8 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "pld [%3, #64] \n" "vld1.u16 {d9}, [%3]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q10, q2, d8[1] \n" "vmla.f32 q11, q3, d8[1] \n" "pld [%2, #256] \n" "vld1.u16 {d28-d31}, [%2]! \n" "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q8, q12, d9[0] \n" "vmla.f32 q9, q13, d9[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q14, d9[1] \n" "vmla.f32 q11, q15, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vst1.u16 {d16-d17}, [%1 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < size; i += 4) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "dup v8.4s, %w8 \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "shrn v8.4h, v8.4s, #16 \n" "st1 {v8.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "vdup.f32 q8, %8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "pld [%3, #64] \n" "vld1.u16 {d9}, [%3]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d9, #16 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16}, [%1]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(bias0) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4); #endif int nn = inch * maxk; // inch always > 0 float32x4_t _sum0 = vdupq_n_f32(0.f); for (int q = 0; q < nn; q++) { float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(tmpptr)); float32x4_t _k0 = vcvt_f32_bf16(vld1_u16(kptr)); _sum0 = vmlaq_f32(_sum0, _r0, _k0); kptr += 4; tmpptr += 4; } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss2 = vpadd_f32(_ss, _ss); float sum0 = vget_lane_f32(_ss2, 0); #endif outptr0[0] = float32_to_bfloat16(bias0 + sum0); outptr0++; } } } static void convolution_im2col_sgemm_transform_kernel_pack4to1_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 4b-4a-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); #if __aarch64__ kernel_tm.create(32 * maxk, inch / 4, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u); #else kernel_tm.create(16 * maxk, inch / 4, outch / 4 + outch % 4, (size_t)2u); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); const Mat k4 = kernel.channel(q + 4); const Mat k5 = kernel.channel(q + 5); const Mat k6 = kernel.channel(q + 6); const Mat k7 = kernel.channel(q + 7); unsigned short* g00 = kernel_tm.channel(q / 8); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k10[k]); g00[2] = float32_to_bfloat16(k20[k]); g00[3] = float32_to_bfloat16(k30[k]); g00[4] = float32_to_bfloat16(k40[k]); g00[5] = float32_to_bfloat16(k50[k]); g00[6] = float32_to_bfloat16(k60[k]); g00[7] = float32_to_bfloat16(k70[k]); g00[8] = float32_to_bfloat16(k01[k]); g00[9] = float32_to_bfloat16(k11[k]); g00[10] = float32_to_bfloat16(k21[k]); g00[11] = float32_to_bfloat16(k31[k]); g00[12] = float32_to_bfloat16(k41[k]); g00[13] = float32_to_bfloat16(k51[k]); g00[14] = float32_to_bfloat16(k61[k]); g00[15] = float32_to_bfloat16(k71[k]); g00[16] = float32_to_bfloat16(k02[k]); g00[17] = float32_to_bfloat16(k12[k]); g00[18] = float32_to_bfloat16(k22[k]); g00[19] = float32_to_bfloat16(k32[k]); g00[20] = float32_to_bfloat16(k42[k]); g00[21] = float32_to_bfloat16(k52[k]); g00[22] = float32_to_bfloat16(k62[k]); g00[23] = float32_to_bfloat16(k72[k]); g00[24] = float32_to_bfloat16(k03[k]); g00[25] = float32_to_bfloat16(k13[k]); g00[26] = float32_to_bfloat16(k23[k]); g00[27] = float32_to_bfloat16(k33[k]); g00[28] = float32_to_bfloat16(k43[k]); g00[29] = float32_to_bfloat16(k53[k]); g00[30] = float32_to_bfloat16(k63[k]); g00[31] = float32_to_bfloat16(k73[k]); g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); #if __aarch64__ unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4); #else unsigned short* g00 = kernel_tm.channel(q / 4); #endif for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k10[k]); g00[2] = float32_to_bfloat16(k20[k]); g00[3] = float32_to_bfloat16(k30[k]); g00[4] = float32_to_bfloat16(k01[k]); g00[5] = float32_to_bfloat16(k11[k]); g00[6] = float32_to_bfloat16(k21[k]); g00[7] = float32_to_bfloat16(k31[k]); g00[8] = float32_to_bfloat16(k02[k]); g00[9] = float32_to_bfloat16(k12[k]); g00[10] = float32_to_bfloat16(k22[k]); g00[11] = float32_to_bfloat16(k32[k]); g00[12] = float32_to_bfloat16(k03[k]); g00[13] = float32_to_bfloat16(k13[k]); g00[14] = float32_to_bfloat16(k23[k]); g00[15] = float32_to_bfloat16(k33[k]); g00 += 16; } } } for (; q < outch; q++) { const Mat k0 = kernel.channel(q); #if __aarch64__ unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4); #else unsigned short* g00 = kernel_tm.channel(q / 4 + q % 4); #endif for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k01[k]); g00[2] = float32_to_bfloat16(k02[k]); g00[3] = float32_to_bfloat16(k03[k]); g00 += 4; } } } } static void convolution_im2col_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); unsigned short* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const unsigned short* sptr = img.row<const unsigned short>(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { uint16x4_t _val0 = vld1_u16(sptr); uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4); uint16x4_t _val2 = vld1_u16(sptr + stride_w * 8); uint16x4_t _val3 = vld1_u16(sptr + stride_w * 12); vst1_u16(ptr, _val0); vst1_u16(ptr + 4, _val1); vst1_u16(ptr + 8, _val2); vst1_u16(ptr + 12, _val3); sptr += stride_w * 16; ptr += 16; } for (; j + 1 < outw; j += 2) { uint16x4_t _val0 = vld1_u16(sptr); uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4); vst1_u16(ptr, _val0); vst1_u16(ptr + 4, _val1); sptr += stride_w * 8; ptr += 8; } for (; j < outw; j++) { uint16x4_t _val = vld1_u16(sptr); vst1_u16(ptr, _val); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4to1_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
GB_binop__iseq_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__iseq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint32) // A*D function (colscale): GB (_AxD__iseq_uint32) // D*A function (rowscale): GB (_DxB__iseq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint32) // C=scalar+B GB (_bind1st__iseq_uint32) // C=scalar+B' GB (_bind1st_tran__iseq_uint32) // C=A+scalar GB (_bind2nd__iseq_uint32) // C=A'+scalar GB (_bind2nd_tran__iseq_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT32 || GxB_NO_ISEQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__iseq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__iseq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__iseq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task_target_device_codegen.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[ .].+[.|,]" --prefix-filecheck-ir-name _ // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER void test_task_affinity(void) { int t; #pragma omp task { #pragma omp target device(t) ; } } #endif // CHECK-LABEL: define {{[^@]+}}@test_task_affinity // CHECK-SAME: () #[[ATTR0:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[T:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 48, i64 0, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*)) // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.kmp_task_t_with_privates* // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 0 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 1 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP4]], i32 0, i32 0 // CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[T]], align 4 // CHECK-NEXT: store i32 [[TMP6]], i32* [[TMP5]], align 8 // CHECK-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP1]]) // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_task_affinity_l18 // CHECK-SAME: () #[[ATTR1:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@.omp_task_privates_map. // CHECK-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i32** noalias noundef [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32**, align 8 // CHECK-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 // CHECK-NEXT: store i32** [[TMP1]], i32*** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP2]], i32 0, i32 0 // CHECK-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[DOTADDR1]], align 8 // CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP4]], align 8 // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@.omp_task_entry. // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 // CHECK-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 // CHECK-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 // CHECK-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32*, align 8 // CHECK-NEXT: [[DOTCAPTURE_EXPR__I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 // CHECK-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 // CHECK-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 // CHECK-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 // CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 // CHECK-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 // CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* // CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 // CHECK-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* // CHECK-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) // CHECK-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 // CHECK-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i32**)* // CHECK-NEXT: call void [[TMP15]](i8* [[TMP14]], i32** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] // CHECK-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4 // CHECK-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR__I]], align 4, !noalias !12 // CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_task_affinity_l18() #[[ATTR4]] // CHECK-NEXT: ret i32 0 //
local_pcon_basis_core.h
#ifndef _LOCAL_PCON_BASIS_CORE_OP_H #define _LOCAL_PCON_BASIS_CORE_OP_H #include <complex> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" template<class I> I inline local_pcon_map_bits(I s,const int map[],const int N){ I ss = 0; for(int i=2*N;i>=0;i--){ int j = map[i]; ss ^= ( j<0 ? ((s&1)^1)<<(2*N+j) : (s&1)<<(2*N-j-1) ); s >>= 1; } return ss; } template<class I> class local_pcon_basis_core : public general_basis_core<I> { public: local_pcon_basis_core(const int _N) : \ general_basis_core<I>::general_basis_core(_N) {} local_pcon_basis_core(const int _N,const int _nt,const int _maps[], \ const int _pers[], const int _qs[]) : \ general_basis_core<I>::general_basis_core(_N,_nt,_maps,_pers,_qs) {} ~local_pcon_basis_core() {} I inline map_state(I s,int n_map,int &sign){ if(general_basis_core<I>::nt<=0){ return s; } const int n = general_basis_core<I>::N; return local_pcon_map_bits(s,&general_basis_core<I>::maps[2*n_map*n],n); } void map_state(I s[],npy_intp M,int n_map,signed char sign[]){ if(general_basis_core<I>::nt<=0){ return; } const int n = general_basis_core<I>::N; const int * map = &general_basis_core<I>::maps[2*n_map*n]; #pragma omp for schedule(static,1) for(npy_intp i=0;i<M;i++){ s[i] = local_pcon_map_bits(s[i],map,n); sign[i] *= 1; } } // void print(I s){ // I s_left,s_right; // split_state(s,s_left,s_right); // std::cout << "|"; // for(int i=0;i<general_basis_core<I>::N;i++){ // std::cout << (s_left&1) << " "; // s_left>>=1; // } // std::cout << ">"; // std::cout << "|"; // for(int i=0;i<general_basis_core<I>::N;i++){ // std::cout << (s_right&1) << " "; // s_right>>=1; // } // std::cout << ">"; // } void split_state(I s,I &s_left,I &s_right){ s_right = ((I(1) << general_basis_core<I>::N) - 1)&s; s_left = (s >> general_basis_core<I>::N); } I comb_state(I s_left,I s_right){ return (s_left<<general_basis_core<I>::N)+s_right; } I next_state_pcon(I s){ I s_left = 0; I s_right = 0; const I one = 1; int n_right=0; split_state(s,s_left,s_right); for(int i=0;i<general_basis_core<I>::N;i++){ n_right += (s&1); s >>= 1; } I max_right = 0; for(int i=0;i<n_right;i++){max_right ^= one << (general_basis_core<I>::N-i-1);} if(s_right<max_right){ I t = (s_right | (s_right - 1)) + 1; s_right = t | ((((t & -t) / (s_right & -s_right)) >> 1) - 1); } else{ s_right = 0; for(int i=0;i<n_right;i++){s_right ^= one<<i;} if(s_left>0){ I t = (s_left | (s_left - 1)) + 1; s_left = t | ((((t & -t) / (s_left & -s_left)) >> 1) - 1); } } return comb_state(s_left,s_right); } int op(I&,std::complex<double>&,const int,const char[],const int[]) = 0; }; #endif
ranksort.c
#include <stdio.h> #include <time.h> #include <stdlib.h> #include <time.h> #include <omp.h> void print_array(int* array, int n) { for (int i = 0; i < n; i++) printf("%d ", array[i]); printf("\n"); } int* fisher_yates_shuffle(int* random_array, int n) { int last_index = n; while(last_index-->1) { int swap_index = rand() % last_index; int last_value = random_array[last_index]; random_array[last_index] = random_array[swap_index]; random_array[swap_index] = last_value; } return random_array; } int* gen_random_unsorted_array(int n) { int* random_array = malloc(sizeof(int) * n); // taken behaves as a hash table // where the key is the index for (int i = 0; i < n; i++) random_array[i] = i; // print_array(random_array, n); return fisher_yates_shuffle(random_array, n); } int find_rank(int* array, int n, int x) { int rank = 0; for(int i = 0; i < n; i++) if(array[i] < x) rank++; return rank; } int* ranksort(int* array, int n) { int* sorted_array = malloc(sizeof(int) * n); #pragma omp parallel for schedule(dynamic) for(int i = 0; i < n; i++) { int value = array[i]; int rank = find_rank(array, n, value); sorted_array[rank] = value; } return sorted_array; } int main(int argc, char** argv) { srand(time(NULL)); int n; if (argc < 2) { puts("Passe o valor de n como argumento da linha de comando"); printf("%s <n>\n", argv[0]); } n = atoi(argv[1]); int *random_array = gen_random_unsorted_array(n); // print_array(random_array, n); double start_time = omp_get_wtime(); int* sorted_array = ranksort(random_array, n); double end_time = omp_get_wtime(); // print_array(sorted_array, n); printf("Demorou %.4f segundos para ordenar %d números\n", end_time - start_time, n); return 0; }
fill_nr_3c.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include "config.h" #include "cint.h" #define NCTRMAX 72 static void dcopy_s1(double *out, double *in, int comp, int ni, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; int i, j, k, ic; double *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { pout[j*ni+i] = pin[j*di+i]; } } } out += nijk; in += dij * dk; } } /* * out[naoi,naoj,naok,comp] in F-order */ void GTOnr3c_fill_s1(int (*intor)(), double *out, int comp, int ish, int jsh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const size_t nijk = nij * naok; ish += ish0; jsh += jsh0; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, dk, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh ] - ao_loc[ksh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; (*intor)(buf, shls, atm, natm, bas, nbas, env, cintopt); dcopy_s1(out+k0*nij, buf, comp, naoi, nij, nijk, di, dj, dk); } } static void dcopy_s2_igtj(double *out, double *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void dcopy_s2_ieqj(double *out, double *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOnr3c_fill_s2ij(int (*intor)(), double *out, int comp, int ish, int jsh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(buf, shls, atm, natm, bas, nbas, env, cintopt); if (ip != jp) { dcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { dcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOnr3c_fill_s2jk(int (*intor)(), double *out, int comp, int ish, int jsh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { // TODO; } void GTOnr3c_drv(int (*intor)(), void (*fill)(), double *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; #pragma omp parallel default(none) \ shared(intor, fill, eri, comp, shls_slice, ao_loc, cintopt, \ atm, natm, bas, nbas, env) { int ish, jsh, ij; double *buf = (double *)malloc(sizeof(double)*NCTRMAX*NCTRMAX*NCTRMAX*comp); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, comp, ish, jsh, buf, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantumScale*alpha * QuantumScale*beta; opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also define that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; if ((source_image->alpha_trait == UndefinedPixelTrait) && (image->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception); status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++) { PixelChannel channel = GetPixelChannelChannel(source_image,i); PixelTrait source_traits = GetPixelChannelTraits(source_image, channel); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((source_traits == UndefinedPixelTrait) || (traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; double angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=2.0*geometry_info.rho; height=width; if ((flags & HeightValue) != 0) height=2.0*geometry_info.sigma; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; if ((flags & XValue) != 0 ) { MagickRealType angle; /* Rotate vectors if a rotation angle is given. */ angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { /* Lets set a angle range and calculate in the loop. */ angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* Perform the variable blurring of each pixel in image. */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); status=InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, DcaDa, Sa, SaSca, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case FreezeCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case InterpolateCompositeOp: case LightenCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case MultiplyCompositeOp: case NegateCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ReflectCompositeOp: case ScreenCompositeOp: case SoftBurnCompositeOp: case SoftDodgeCompositeOp: case SoftLightCompositeOp: case StampCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } case ModulusAddCompositeOp: { if ((Sa+Da) <= 1.0) { alpha=(Sa+Da); break; } alpha=((Sa+Da)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sa-Da) >= 0.0) { alpha=(Sa-Da); break; } alpha=((Sa-Da)+1.0); break; } default: { alpha=1.0; break; } } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case RMSECompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits = GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((channel == AlphaPixelChannel) && ((traits & UpdatePixelTrait) != 0)) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case RMSECompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case BlurCompositeOp: case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case DifferenceCompositeOp: { pixel=QuantumRange*fabs(Sa-Da); break; } case FreezeCompositeOp: { pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)* PerceptibleReciprocal(Da)); if (pixel < 0.0) pixel=0.0; break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25* cos(MagickPI*Da)); break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } case MultiplyCompositeOp: { pixel=QuantumRange*Sa*Da; break; } case NegateCompositeOp: { pixel=QuantumRange*((1.0-Sa-Da)); break; } case ReflectCompositeOp: { pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case StampCompositeOp: { pixel=QuantumRange*(Sa+Da*Da-1.0); break; } case StereoCompositeOp: { pixel=QuantumRange*(Sa+Da)/2; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } if (source_traits == UndefinedPixelTrait) continue; /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Dc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; SaSca=Sa*PerceptibleReciprocal(Sca); DcaDa=Dca*PerceptibleReciprocal(Da); switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)* SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) GetPixelBlack(source_image,p); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*gamma*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case FreezeCompositeOp: { pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)* PerceptibleReciprocal(Dca)); if (pixel < 0.0) pixel=0.0; break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25* cos(MagickPI*Dca)); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { if ((Sca+Dca) <= 1.0) { pixel=QuantumRange*(Sca+Dca); break; } pixel=QuantumRange*((Sca+Dca)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sca-Dca) >= 0.0) { pixel=QuantumRange*(Sca-Dca); break; } pixel=QuantumRange*((Sca-Dca)+1.0); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case NegateCompositeOp: { pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*Sca; break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case ReflectCompositeOp: { pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case RMSECompositeOp: { double gray; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } gray=sqrt( (canvas_pixel.red-source_pixel.red)* (canvas_pixel.red-source_pixel.red)+ (canvas_pixel.green-source_pixel.green)* (canvas_pixel.green-source_pixel.green)+ (canvas_pixel.blue-source_pixel.blue)* (canvas_pixel.blue-source_pixel.blue)/3.0); switch (channel) { case RedPixelChannel: pixel=gray; break; case GreenPixelChannel: pixel=gray; break; case BluePixelChannel: pixel=gray; break; default: pixel=Dc; break; } break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftBurnCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)* PerceptibleReciprocal(Dca)); break; } case SoftDodgeCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)* PerceptibleReciprocal(Sca)); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa* (4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)- DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case StampCompositeOp: { pixel=QuantumRange*(Sca+Dca*Dca-1.0); break; } case StereoCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)* PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0* (Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
GB_unop__identity_uint16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_int16) // op(A') function: GB (_unop_tran__identity_uint16_int16) // C type: uint16_t // A type: int16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_int16) ( uint16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trmv_x_csr_u_hi.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t trmv_x_csr_u_hi_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0;i < m; ++i) { ALPHA_Number tmp = x[i]; for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai) { const ALPHA_INT col = A->col_indx[ai]; if(col <= i) { continue; } else { alpha_madde(tmp, A->values[ai], x[col]); } } alpha_mule(tmp, alpha); alpha_mule(y[i], beta); alpha_adde(y[i], tmp); } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return trmv_x_csr_u_hi_omp(alpha, A, x, beta, y); }
primitives.h
#pragma once #include <vector> #include <cstdint> #include <omp.h> #include "local_buffer.h" #include "../timer.h" using namespace std; #define CACHE_LINE_ENTRY (16) #define LOCAL_BUDGET (8*1024*1024) template<typename T> void MemSetOMP(T *arr, int val, size_t size) { size_t tid = omp_get_thread_num(); size_t max_omp_threads = omp_get_num_threads(); size_t task_num = size; size_t avg = (task_num + max_omp_threads - 1) / max_omp_threads; auto it_beg = avg * tid; auto it_end = min(avg * (tid + 1), task_num); memset(arr + it_beg, val, sizeof(T) * (it_end - it_beg)); #pragma omp barrier } template<typename T> void MemCpyOMP(T *dst, T *src, size_t size) { size_t tid = omp_get_thread_num(); size_t max_omp_threads = omp_get_num_threads(); size_t task_num = size; size_t avg = (task_num + max_omp_threads - 1) / max_omp_threads; auto it_beg = avg * tid; auto it_end = min(avg * (tid + 1), task_num); memcpy(dst + it_beg, src + it_beg, sizeof(T) * (it_end - it_beg)); #pragma omp barrier } /* * InclusivePrefixSumOMP: General Case Inclusive Prefix Sum * histogram: is for cache-aware thread-local histogram purpose * output: should be different from the variables captured in function object f * size: is the original size for the flagged prefix sum * f: requires it as the parameter, f(it) return the histogram value of that it */ template<typename H, typename T, typename F> void InclusivePrefixSumOMP(vector<H> &histogram, T *output, size_t size, F f) { int omp_num_threads = omp_get_num_threads(); #pragma omp single { histogram = vector<H>((omp_num_threads + 1) * CACHE_LINE_ENTRY, 0); } static thread_local int tid = omp_get_thread_num(); // 1st Pass: Histogram. auto avg = size / omp_num_threads; auto it_beg = avg * tid; auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY; histogram[histogram_idx] = 0; auto it_end = tid == omp_num_threads - 1 ? size : avg * (tid + 1); size_t prev = 0u; for (auto it = it_beg; it < it_end; it++) { auto value = f(it); histogram[histogram_idx] += value; prev += value; output[it] = prev; } #pragma omp barrier // 2nd Pass: single-prefix-sum & Add previous sum. #pragma omp single { for (auto local_tid = 0; local_tid < omp_num_threads; local_tid++) { auto local_histogram_idx = (local_tid + 1) * CACHE_LINE_ENTRY; auto prev_histogram_idx = (local_tid) * CACHE_LINE_ENTRY; histogram[local_histogram_idx] += histogram[prev_histogram_idx]; } } { auto prev_sum = histogram[tid * CACHE_LINE_ENTRY]; for (auto it = it_beg; it < it_end; it++) { output[it] += prev_sum; } #pragma omp barrier } } /* * FlagPrefixSumOMP: special case of InclusivePrefixSumOMP */ template<typename H, typename T, typename F> void FlagPrefixSumOMP(vector<H> &histogram, T *output, size_t size, F f) { InclusivePrefixSumOMP(histogram, output, size, [&f](size_t it) { return f(it) ? 1 : 0; }); } /* * SelectNotFOMP: selection primitive * !f(it) returns selected */ template<typename H, typename T, typename OFF, typename F> void SelectNotFOMP(vector<H> &histogram, T *output, T *input, OFF *relative_off, size_t size, F f) { FlagPrefixSumOMP(histogram, relative_off, size, f); #pragma omp for for (size_t i = 0u; i < size; i++) { if (!(f(i))) { auto off = i - relative_off[i]; output[off] = input[i]; } } } template<typename OFF, typename F> void Histogram(size_t size, OFF *&bucket_ptrs, int32_t num_buckets, F f, Timer *timer = nullptr) { // Histogram. auto local_buf = (uint8_t *) calloc(num_buckets, sizeof(uint8_t)); #pragma omp for for (size_t i = 0u; i < size; i++) { auto src = f(i); local_buf[src]++; if (local_buf[src] == 0xff) { __sync_fetch_and_add(&bucket_ptrs[src], 0xff); local_buf[src] = 0; } } #pragma omp single if (timer != nullptr)log_info("[%s]: Local Comp Time: %.9lfs", __FUNCTION__, timer->elapsed()); for (size_t i = 0; i < num_buckets; i++) { if (local_buf[i] != 0) { __sync_fetch_and_add(&bucket_ptrs[i], local_buf[i]); } } #pragma omp barrier free(local_buf); } template<typename OFF, typename F> void HistogramAtomic(size_t size, OFF *&bucket_ptrs, int32_t num_buckets, F f) { // Histogram. #pragma omp for for (size_t i = 0u; i < size; i++) { auto src = f(i); __sync_fetch_and_add(&bucket_ptrs[src], 1); } } /* * Require an output array, * f: is the property for the bucket ID, given an index on the input array * Inefficient when there are lots of contentions because of atomic operations */ template<typename H, typename T, typename OFF, typename F> void BucketSort(vector<H> &histogram, T *&input, T *&output, OFF *&cur_write_off, OFF *&bucket_ptrs, size_t size, int32_t num_buckets, F f, Timer *timer = nullptr) { // Populate. #pragma omp single { bucket_ptrs = (OFF *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off = (OFF *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off[0] = 0; } MemSetOMP(bucket_ptrs, 0, num_buckets + 1); Histogram(size, bucket_ptrs, num_buckets, f, timer); // HistogramAtomic(size, bucket_ptrs, num_buckets, f); #pragma omp single if (timer != nullptr)log_info("[%s]: Histogram, Time: %.9lfs", __FUNCTION__, timer->elapsed()); InclusivePrefixSumOMP(histogram, cur_write_off + 1, num_buckets, [&bucket_ptrs](uint32_t it) { return bucket_ptrs[it]; }); MemCpyOMP(bucket_ptrs, cur_write_off, num_buckets + 1); #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Scatter, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } // Scatter. #pragma omp for for (size_t i = 0u; i < size; i++) { auto element = input[i]; auto bucket_id = f(i); auto old_offset = __sync_fetch_and_add(&(cur_write_off[bucket_id]), 1); output[old_offset] = element; } #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Sort, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } #pragma omp barrier } template<typename H, typename T, typename OFF, typename F> void BucketSortSmallBuckets(vector<H> &histogram, T *&input, T *&output, OFF *&cur_write_off, OFF *&bucket_ptrs, size_t size, int32_t num_buckets, F f, Timer *timer = nullptr) { using BufT= LocalWriteBuffer<T, uint32_t>; auto cap = max<int>(CACHE_LINE_ENTRY, LOCAL_BUDGET / num_buckets / sizeof(T)); auto bucket_write_buffers = (BufT *) malloc(num_buckets * sizeof(BufT)); auto bucket_buffers = (T *) malloc(cap * num_buckets * sizeof(T)); // Populate. #pragma omp single { int max_omp_threads = omp_get_num_threads(); log_info("[%s]: Mem Size Buckets: %zu, Bucket#: %d", __FUNCTION__, cap * num_buckets * sizeof(T) * max_omp_threads, num_buckets); bucket_ptrs = (uint32_t *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off = (uint32_t *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off[0] = 0; } MemSetOMP(bucket_ptrs, 0, num_buckets + 1); Histogram(size, bucket_ptrs, num_buckets, f); #pragma omp barrier InclusivePrefixSumOMP(histogram, cur_write_off + 1, num_buckets, [&bucket_ptrs](uint32_t it) { return bucket_ptrs[it]; }); MemCpyOMP(bucket_ptrs, cur_write_off, num_buckets + 1); #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Scatter, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } for (auto i = 0; i < num_buckets; i++) { bucket_write_buffers[i] = BufT(bucket_buffers + cap * i, cap, output, &cur_write_off[i]); } #pragma omp barrier // Scatter. #pragma omp for for (size_t i = 0u; i < size; i++) { auto element = input[i]; auto bucket_id = f(i); bucket_write_buffers[bucket_id].push(element); } for (auto i = 0; i < num_buckets; i++) { bucket_write_buffers[i].submit_if_possible(); } #pragma omp barrier #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Sort, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } free(bucket_buffers); free(bucket_write_buffers); #pragma omp barrier }
shmem_ctx.c
/* * Copyright (c) 2017 Intel Corporation. All rights reserved. * This software is available to you under the BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * This test is derived from an example provided in the OpenSHMEM 1.4 * specification. Additional copyrights may apply. * */ #include <stdio.h> #include <shmem.h> long task_cntr = 0; /* Next task counter */ long tasks_done = 0; /* Tasks done by this PE */ long total_done = 0; /* Total tasks done by all PEs */ int main(void) { int tl, ret; long ntasks = 1024; /* Total tasks per PE */ ret = shmem_init_thread(SHMEM_THREAD_MULTIPLE, &tl); if (tl != SHMEM_THREAD_MULTIPLE || ret != 0) { printf("Init failed (requested thread level %d, got %d, ret %d)\n", SHMEM_THREAD_MULTIPLE, tl, ret); if (ret == 0) { shmem_global_exit(1); } else { return ret; } } int me = shmem_my_pe(); int npes = shmem_n_pes(); #pragma omp parallel reduction (+:tasks_done) { shmem_ctx_t ctx; int task_pe = me, pes_done = 0; int ret = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (ret != 0) { printf("%d: Warning, unable to create context (%d)\n", me, ret); ctx = SHMEM_CTX_DEFAULT; } /* Process tasks on all PEs, starting with the local PE. After * all tasks on a PE are completed, help the next PE. */ while (pes_done < npes) { long task = shmem_ctx_long_atomic_fetch_inc(ctx, &task_cntr, task_pe); while (task < ntasks) { /* Perform task (task_pe, task) */ tasks_done++; task = shmem_ctx_long_atomic_fetch_inc(ctx, &task_cntr, task_pe); } pes_done++; task_pe = (task_pe + 1) % npes; } if (ctx != SHMEM_CTX_DEFAULT) shmem_ctx_destroy(ctx); } shmem_long_sum_reduce(SHMEM_TEAM_WORLD, &total_done, &tasks_done, 1); int result = (total_done != ntasks * npes); if (me == 0 && result) printf("Error: total_done is %ld, expected %ld\n", total_done, ntasks * npes); shmem_finalize(); return result; }
lis_matrix_bsc.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * lis_matrix_set * lis_matrix_malloc * lis_matrix_copy * lis_matrix_convert * lis_matrix_get_diagonal * lis_matrix_scaling * lis_matrix_scaling_symm * lis_matrix_normf * lis_matrix_transpose ************************************************/ #undef __FUNC__ #define __FUNC__ "lis_matrix_set_bsc" LIS_INT lis_matrix_set_bsc(LIS_INT bnr, LIS_INT bnc, LIS_INT bnnz, LIS_INT *bptr, LIS_INT *bindex, LIS_SCALAR *value, LIS_MATRIX A) { LIS_INT err; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->bptr = bptr; A->bindex = bindex; A->value = value; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_BSC; A->is_block = LIS_TRUE; A->bnnz = bnnz; A->nr = (A->n-1)/bnr+1; A->nc = (A->gn-1)/bnc+1; if( A->n==A->np ) { A->nc = 1 + (A->n - 1)/bnc; A->pad = (bnc - A->n%bnc)%bnc; } else { A->nc = 2 + (A->n - 1)/bnc + (A->np - A->n - 1)/bnc; A->pad = (bnc - A->n%bnc)%bnc + (bnc - (A->np-A->n)%bnc)%bnc; } A->bnr = bnr; A->bnc = bnc; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_setDLU_bsc" LIS_INT lis_matrix_setDLU_bsc(LIS_INT bnr, LIS_INT bnc, LIS_INT lbnnz, LIS_INT ubnnz, LIS_MATRIX_DIAG D, LIS_INT *lbptr, LIS_INT *lbindex, LIS_SCALAR *lvalue, LIS_INT *ubptr, LIS_INT *ubindex, LIS_SCALAR *uvalue, LIS_MATRIX A) { LIS_INT err; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->L = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_bsc::A->L"); if( A->L==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); return LIS_OUT_OF_MEMORY; } A->U = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_bsc::A->U"); if( A->U==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); lis_matrix_DLU_destroy(A); return LIS_OUT_OF_MEMORY; } A->D = D; A->L->bnnz = lbnnz; A->L->bptr = lbptr; A->L->bindex = lbindex; A->L->value = lvalue; A->U->bnnz = ubnnz; A->U->bptr = ubptr; A->U->bindex = ubindex; A->U->value = uvalue; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_BSC; A->is_splited = LIS_TRUE; A->is_block = LIS_TRUE; A->nr = (A->n-1)/bnr+1; A->nc = (A->gn-1)/bnc+1; if( A->n==A->np ) { A->nc = 1 + (A->n - 1)/bnc; A->pad = (bnc - A->n%bnc)%bnc; } else { A->nc = 2 + (A->n - 1)/bnc + (A->np - A->n - 1)/bnc; A->pad = (bnc - A->n%bnc)%bnc + (bnc - (A->np-A->n)%bnc)%bnc; } A->bnr = bnr; A->bnc = bnc; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_malloc_bsc" LIS_INT lis_matrix_malloc_bsc(LIS_INT n, LIS_INT bnr, LIS_INT bnc, LIS_INT bnnz, LIS_INT **bptr, LIS_INT **bindex, LIS_SCALAR **value) { LIS_INT nc; LIS_DEBUG_FUNC_IN; nc = 1 + (n -1)/bnc; *bptr = NULL; *bindex = NULL; *value = NULL; *bptr = (LIS_INT *)lis_malloc( (nc+1)*sizeof(LIS_INT),"lis_matrix_malloc_bsc::bptr" ); if( *bptr==NULL ) { LIS_SETERR_MEM((nc+1)*sizeof(LIS_INT)); lis_free2(3,*bptr,*bindex,*value); return LIS_FAILS; } *bindex = (LIS_INT *)lis_malloc( bnnz*sizeof(LIS_INT),"lis_matrix_malloc_bsc::bindex" ); if( *bindex==NULL ) { LIS_SETERR_MEM(bnnz*sizeof(LIS_INT)); lis_free2(3,*bptr,*bindex,*value); return LIS_OUT_OF_MEMORY; } *value = (LIS_SCALAR *)lis_malloc( bnnz*bnr*bnc*sizeof(LIS_SCALAR),"lis_matrix_malloc_bsc::value" ); if( *value==NULL ) { LIS_SETERR_MEM(bnnz*bnr*bnc*sizeof(LIS_SCALAR)); lis_free2(3,*bptr,*bindex,*value); return LIS_OUT_OF_MEMORY; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_elements_copy_bsc" LIS_INT lis_matrix_elements_copy_bsc(LIS_INT n, LIS_INT bnr, LIS_INT bnc, LIS_INT bnnz, LIS_INT *ptr, LIS_INT *index, LIS_SCALAR *value, LIS_INT *o_ptr, LIS_INT *o_index, LIS_SCALAR *o_value) { LIS_INT i,j,k; LIS_INT nc,bs; LIS_DEBUG_FUNC_IN; nc = 1 + (n - 1)/bnc; bs = bnr*bnc; #ifdef _OPENMP #pragma omp parallel private(i,j,k) #endif { #ifdef _OPENMP #pragma omp for #endif for(i=0;i<nc+1;i++) { o_ptr[i] = ptr[i]; } #ifdef _OPENMP #pragma omp for #endif for(i=0;i<nc;i++) { for(j=ptr[i];j<ptr[i+1];j++) { for(k=0;k<bs;k++) { o_value[j*bs+k] = value[j*bs+k]; } o_index[j] = index[j]; } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_copy_bsc" LIS_INT lis_matrix_copy_bsc(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT err; LIS_INT np,bnnz,bnr,bnc; LIS_INT lbnnz,ubnnz; LIS_INT *bptr,*bindex; LIS_INT *lbptr,*lbindex; LIS_INT *ubptr,*ubindex; LIS_SCALAR *value,*lvalue,*uvalue; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; np = Ain->np; bnnz = Ain->bnnz; bnr = Ain->bnr; bnc = Ain->bnc; if( Ain->is_splited ) { lbnnz = Ain->L->bnnz; ubnnz = Ain->U->bnnz; lbptr = NULL; lbindex = NULL; lvalue = NULL; ubptr = NULL; ubindex = NULL; uvalue = NULL; D = NULL; err = lis_matrix_malloc_bsc(np,bnr,bnc,lbnnz,&lbptr,&lbindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_bsc(np,bnr,bnc,ubnnz,&ubptr,&ubindex,&uvalue); if( err ) { lis_free2(6,ubptr,lbptr,ubindex,lbindex,uvalue,lvalue); return err; } err = lis_matrix_diag_duplicateM(Ain,&D); if( err ) { lis_free2(6,ubptr,lbptr,ubindex,lbindex,uvalue,lvalue); return err; } lis_matrix_diag_copy(Ain->D,D); lis_matrix_elements_copy_bsc(np,bnr,bnc,lbnnz,Ain->L->bptr,Ain->L->bindex,Ain->L->value,lbptr,lbindex,lvalue); lis_matrix_elements_copy_bsc(np,bnr,bnc,ubnnz,Ain->U->bptr,Ain->U->bindex,Ain->U->value,ubptr,ubindex,uvalue); err = lis_matrix_setDLU_bsc(bnr,bnc,lbnnz,ubnnz,D,lbptr,lbindex,lvalue,ubptr,ubindex,uvalue,Aout); if( err ) { lis_free2(6,ubptr,lbptr,ubindex,lbindex,uvalue,lvalue); return err; } } if( !Ain->is_splited || (Ain->is_splited && Ain->is_save) ) { bptr = NULL; bindex = NULL; value = NULL; err = lis_matrix_malloc_bsc(np,bnr,bnc,bnnz,&bptr,&bindex,&value); if( err ) { return err; } lis_matrix_elements_copy_bsc(np,bnr,bnc,bnnz,Ain->bptr,Ain->bindex,Ain->value,bptr,bindex,value); err = lis_matrix_set_bsc(bnr,bnc,bnnz,bptr,bindex,value,Aout); if( err ) { lis_free2(3,bptr,bindex,value); return err; } } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_crs2bsc" LIS_INT lis_matrix_convert_ccs2bsc(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,k,n,bnr,bnc; LIS_INT ii,jj,kk,pad; LIS_INT bnnz,bj,nr,nc,jpos,nnz,ij,kv,bi; LIS_INT err; LIS_INT np,nprocs,my_rank; LIS_INT *iw,*iw2; LIS_INT *bptr,*bindex; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; bnr = Aout->conv_bnr; bnc = Aout->conv_bnc; n = Ain->n; np = Ain->np; nr = 1 + (n - 1)/bnr; pad = (bnc - n%bnc)%bnc; if( n==np ) { nc = 1 + (n - 1)/bnc; } else { nc = 2 + (n - 1)/bnc + (np - n - 1)/bnc; } bptr = NULL; bindex = NULL; value = NULL; iw = NULL; iw2 = NULL; bptr = (LIS_INT *)lis_malloc( (nc+1)*sizeof(LIS_INT),"lis_matrix_convert_ccs2bsc::bptr" ); if( bptr==NULL ) { LIS_SETERR_MEM((nc+1)*sizeof(LIS_INT)); lis_free2(5,bptr,bindex,value,iw,iw2); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif iw = (LIS_INT *)lis_malloc( nprocs*nr*sizeof(LIS_INT),"lis_matrix_convert_ccs2bsc::iw" ); iw2 = (LIS_INT *)lis_malloc( nprocs*nr*sizeof(LIS_INT),"lis_matrix_convert_ccs2bsc::iw2" ); #ifdef _OPENMP #pragma omp parallel private(i,k,ii,j,bj,kk,ij,jj,kv,jpos,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif memset(&iw[my_rank*nr],0,nr*sizeof(LIS_INT)); #ifdef _OPENMP #pragma omp for #endif for(i=0;i<nc;i++) { k = 0; kk = bnc*i; jj = 0; #ifdef USE_MPI for(ii=0;ii+kk<np&&ii<bnc;ii++) { for(j=Ain->ptr[kk+ii];j<Ain->ptr[kk+ii+1];j++) { bj = Ain->index[j]/bnr; jpos = iw[my_rank*nr + bj]; if( jpos==0 ) { iw[my_rank*nr + bj] = 1; iw2[my_rank*nr + jj] = bj; jj++; } } } #else for(ii=0;ii+kk<np&&ii<bnc;ii++) { for(j=Ain->ptr[kk+ii];j<Ain->ptr[kk+ii+1];j++) { bj = Ain->index[j]/bnr; jpos = iw[my_rank*nr + bj]; if( jpos==0 ) { iw[my_rank*nr + bj] = 1; iw2[my_rank*nr + jj] = bj; jj++; } } } #endif for(bj=0;bj<jj;bj++) { k++; ii = iw2[my_rank*nr + bj]; iw[my_rank*nr + ii]=0; } bptr[i+1] = k; } } bptr[0] = 0; for(i=0;i<nc;i++) { bptr[i+1] += bptr[i]; } bnnz = bptr[nc]; nnz = bnnz*bnr*bnc; bindex = (LIS_INT *)lis_malloc( bnnz*sizeof(LIS_INT),"lis_matrix_convert_ccs2bsc::bindex" ); if( bindex==NULL ) { LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT)); lis_free2(5,bptr,bindex,value,iw,iw2); return LIS_OUT_OF_MEMORY; } value = (LIS_SCALAR *)lis_malloc( nnz*sizeof(LIS_SCALAR),"lis_matrix_convert_ccs2bsc::value" ); if( value==NULL ) { LIS_SETERR_MEM(nnz*sizeof(LIS_SCALAR)); lis_free2(5,bptr,bindex,value,iw,iw2); return LIS_OUT_OF_MEMORY; } /* convert bsc */ #ifdef _OPENMP #pragma omp parallel private(bi,i,ii,k,j,bj,jpos,kv,kk,ij,jj,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif memset(&iw[my_rank*nr],0,nr*sizeof(LIS_INT)); #ifdef _OPENMP #pragma omp for #endif for(bi=0;bi<nc;bi++) { i = bi*bnc; ii = 0; kk = bptr[bi]; while( i+ii<np && ii<=bnc-1 ) { for( k=Ain->ptr[i+ii];k<Ain->ptr[i+ii+1];k++) { j = Ain->index[k]; bj = j/bnr; j = j%bnr; jpos = iw[my_rank*nr + bj]; if( jpos==0 ) { kv = kk * bnr * bnc; iw[my_rank*nr + bj] = kv+1; bindex[kk] = bj; for(jj=0;jj<bnr*bnc;jj++) value[kv+jj] = 0.0; ij = j + ii*bnc; value[kv+ij] = Ain->value[k]; kk = kk+1; } else { ij = j + ii*bnc; value[jpos+ij-1] = Ain->value[k]; } } ii = ii+1; } for(j=bptr[bi];j<bptr[bi+1];j++) { iw[my_rank*nr + bindex[j]] = 0; } } } lis_free2(2,iw,iw2); err = lis_matrix_set_bsc(bnr,bnc,bnnz,bptr,bindex,value,Aout); if( err ) { lis_free2(3,bptr,bindex,value); return err; } Aout->pad_comm = pad; err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } #ifdef USE_MPI Aout->commtable->pad = pad; MPI_Barrier(Ain->comm); #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_bsc2crs" LIS_INT lis_matrix_convert_bsc2crs(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,k,l; LIS_INT nr,nc,bnr,bnc,bs,bi,bj; LIS_INT err; LIS_INT n,nnz,is,nprocs,my_rank; LIS_INT *iw,*ptr,*index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; nr = Ain->nr; nc = Ain->nc; bnr = Ain->bnr; bnc = Ain->bnc; bs = bnr*bnc; is = Ain->is; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif iw = NULL; ptr = NULL; index = NULL; value = NULL; iw = (LIS_INT *)lis_malloc( nprocs*n*sizeof(LIS_INT),"lis_matrix_convert_bsc2crs::iw" ); if( iw==NULL ) { LIS_SETERR_MEM(nprocs*n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } ptr = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_convert_bsc2crs::ptr" ); if( ptr==NULL ) { LIS_SETERR_MEM((n+1)*sizeof(LIS_INT)); lis_free2(4,ptr,index,value,iw); return LIS_OUT_OF_MEMORY; } /* check nnz */ #ifdef _OPENMP #pragma omp parallel private(i,j,bi,bj,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif memset(&iw[my_rank*n],0,n*sizeof(LIS_INT)); #ifdef _OPENMP #pragma omp for #endif for(bj=0;bj<nc;bj++) { for(j=0;j<bnc;j++) { for(bi=Ain->bptr[bj];bi<Ain->bptr[bj+1];bi++) { for(i=0;i<bnr;i++) { if( Ain->value[bi*bs + j*bnr + i] != (LIS_SCALAR)0.0 ) { iw[my_rank*n + Ain->bindex[bi]*bnr + i]++; } } } } } #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n;i++) { j = 0; for(k=0;k<nprocs;k++) { j += iw[k*n + i]; } ptr[i+1] = j; } } ptr[0] = 0; for(i=0;i<n;i++) { ptr[i+1] += ptr[i]; } nnz = ptr[n]; index = (LIS_INT *)lis_malloc( nnz*sizeof(LIS_INT),"lis_matrix_convert_bsc2crs::index" ); if( index==NULL ) { lis_free2(4,ptr,index,value,iw); LIS_SETERR_MEM(nnz*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } value = (LIS_SCALAR *)lis_malloc( nnz*sizeof(LIS_SCALAR),"lis_matrix_convert_bsc2crs::value" ); if( value==NULL ) { lis_free2(4,ptr,index,value,iw); LIS_SETERR_MEM(nnz*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } /* convert crs */ #ifdef _OPENMP #pragma omp parallel private(i,j,bi,bj,k,l,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif #ifdef _OPENMP #pragma omp for #endif for(i=0;i<n;i++) { k = ptr[i]; for(j=0;j<nprocs;j++) { l = iw[j*n + i]; iw[j*n + i] = k; k = k + l; } } #ifdef _OPENMP #pragma omp for #endif for(bj=0;bj<nc;bj++) { for(j=0;j<bnc;j++) { if( bj*bnc+j==n ) break; for(bi=Ain->bptr[bj];bi<Ain->bptr[bj+1];bi++) { for(i=0;i<bnr;i++) { if( Ain->value[bi*bs + j*bnr + i] != (LIS_SCALAR)0.0 ) { k = iw[my_rank*n + Ain->bindex[bi]*bnr + i]++; value[k] = Ain->value[bi*bs + j*bnr + i]; index[k] = bj*bnc + j; } } } } } } err = lis_matrix_set_crs(nnz,ptr,index,value,Aout); if( err ) { lis_free2(4,ptr,index,value,iw); return err; } Aout->pad = 0; Aout->pad_comm = 0; err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } #ifdef USE_MPI Aout->commtable->pad = 0; #endif lis_free(iw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_get_diagonal_bsc" LIS_INT lis_matrix_get_diagonal_bsc(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,k,bi,bj,bjj,nr,nc; LIS_INT bnr,bnc,bs; LIS_INT n; LIS_DEBUG_FUNC_IN; n = A->n; nr = A->nr; nc = A->nc; bnr = A->bnr; bnc = A->bnc; bs = bnr*bnc; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0;i<nr;i++) { for(j=0;j<bnr;j++) { d[i*bnr+j] = A->D->value[i*bs+j*bnr+j]; } } } else { #ifdef _OPENMP #pragma omp parallel for private(bi,bj,bjj,i,j,k) #endif for(bi=0;bi<nr;bi++) { k = 0; i = bi*bnr; for(bj=A->bptr[bi];bj<A->bptr[bi+1];bj++) { bjj = A->bindex[bj]; if( i>=bjj*bnc && i<(bjj+1)*bnc ) { for(j=i%bnc;j<bnc&&k<bnr&&i<n;j++) { d[i] = A->value[bj*bs + j*bnr + k]; i++; k++; } } if( k==bnr ) break; } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_bsc" LIS_INT lis_matrix_scaling_bsc(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT bi,bj,bs; LIS_INT nr,nc; LIS_INT bnr,bnc; LIS_INT n; LIS_DEBUG_FUNC_IN; n = A->n; bnr = A->bnr; bnc = A->bnc; nr = A->nr; nc = A->nc; bs = A->bnr*A->bnc; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(bi,bj,i,j) #endif for(bi=0;bi<nr;bi++) { for(bj=A->L->bptr[bi];bj<A->L->bptr[bi+1];bj++) { for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->L->value[bj*bs+j*bnr+i] *= d[bi*bnr+i]; } } } for(bj=A->U->bptr[bi];bj<A->U->bptr[bi+1];bj++) { for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->U->value[bj*bs+j*bnr+i] *= d[bi*bnr+i]; } } } for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->D->value[bi*bs+j*bnr+i] *= d[bi*bnr+i]; } } } } else { #ifdef _OPENMP #pragma omp parallel for private(bi,bj,i,j) #endif for(bi=0;bi<nr;bi++) { for(bj=A->bptr[bi];bj<A->bptr[bi+1];bj++) { for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->value[bj*bs+j*bnr+i] *= d[bi*bnr+i]; } } } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_symm_bsc" LIS_INT lis_matrix_scaling_symm_bsc(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT bi,bj,bjj,bs; LIS_INT nr,nc; LIS_INT bnr,bnc; LIS_INT n; LIS_DEBUG_FUNC_IN; n = A->n; bnr = A->bnr; bnc = A->bnc; nr = A->nr; nc = A->nc; bs = A->bnr*A->bnc; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(bi,bj,i,j) #endif for(bi=0;bi<nr;bi++) { for(bj=A->L->bptr[bi];bj<A->L->bptr[bi+1];bj++) { bjj = A->L->bindex[bj]; for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->L->value[bj*bs+j*bnr+i] *= d[bi*bnr+i]*d[bjj*bnc+j]; } } } for(bj=A->U->bptr[bi];bj<A->U->bptr[bi+1];bj++) { bjj = A->U->bindex[bj]; for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->U->value[bj*bs+j*bnr+i] *= d[bi*bnr+i]*d[bjj*bnc+j]; } } } for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->D->value[bi*bs+j*bnr+i] *= d[bi*bnr+i]*d[bi*bnr+i]; } } } } else { #ifdef _OPENMP #pragma omp parallel for private(bi,bj,bjj,i,j) #endif for(bi=0;bi<nr;bi++) { for(bj=A->bptr[bi];bj<A->bptr[bi+1];bj++) { bjj = A->bindex[bj]; for(j=0;j<bnc;j++) { for(i=0;i<bnr;i++) { A->value[bj*bs+j*bnr+i] *= d[bi*bnr+i]*d[bjj*bnc+j]; } } } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_normf_bsc" LIS_INT lis_matrix_normf_bsc(LIS_MATRIX A, LIS_SCALAR *nrm) { LIS_INT j; LIS_INT bi,bj,bs; LIS_INT nr,nc; LIS_INT bnr,bnc; LIS_INT n; LIS_SCALAR sum; LIS_DEBUG_FUNC_IN; n = A->n; bnr = A->bnr; bnc = A->bnc; nr = A->nr; nc = A->nc; bs = bnr*bnc; sum = (LIS_SCALAR)0; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(bi,bj,j) #endif for(bi=0;bi<nr;bi++) { for(bj=A->L->bptr[bi];bj<A->L->bptr[bi+1];bj++) { for(j=0;j<bs;j++) { sum += A->L->value[bj+j]*A->L->value[bj+j]; } } for(bj=A->U->bptr[bi];bj<A->U->bptr[bi+1];bj++) { for(j=0;j<bs;j++) { sum += A->U->value[bj+j]*A->U->value[bj+j]; } } } } else { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(bi,bj,j) #endif for(bi=0;bi<nr;bi++) { for(bj=A->bptr[bi];bj<A->bptr[bi+1];bj++) { for(j=0;j<bs;j++) { sum += A->value[bj+j]*A->value[bj+j]; } } } } *nrm = sqrt(sum); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_split_bsc" LIS_INT lis_matrix_split_bsc(LIS_MATRIX A) { LIS_INT i,j,n,np; LIS_INT bnr,bnc,nr,nc,bs; LIS_INT nnzl,nnzu; LIS_INT err; LIS_INT *lptr,*lindex,*uptr,*uindex; LIS_SCALAR *lvalue,*uvalue; LIS_MATRIX_DIAG D; #ifdef _OPENMP LIS_INT kl,ku; LIS_INT *liw,*uiw; #endif LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; bnr = A->bnr; bnc = A->bnc; nr = A->nr; nc = A->nc; bs = A->bnr*A->bnc; nnzl = 0; nnzu = 0; D = NULL; lptr = NULL; lindex = NULL; lvalue = NULL; uptr = NULL; uindex = NULL; uvalue = NULL; if( bnr!=bnc ) { LIS_SETERR_IMP; return LIS_ERR_NOT_IMPLEMENTED; } #ifdef _OPENMP liw = (LIS_INT *)lis_malloc((nc+1)*sizeof(LIS_INT),"lis_matrix_split_bsc::liw"); if( liw==NULL ) { LIS_SETERR_MEM((nc+1)*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } uiw = (LIS_INT *)lis_malloc((nc+1)*sizeof(LIS_INT),"lis_matrix_split_bsc::uiw"); if( uiw==NULL ) { LIS_SETERR_MEM((nc+1)*sizeof(LIS_INT)); lis_free(liw); return LIS_OUT_OF_MEMORY; } #pragma omp parallel for private(i) for(i=0;i<nc+1;i++) { liw[i] = 0; uiw[i] = 0; } #pragma omp parallel for private(i,j) for(i=0;i<nc;i++) { for(j=A->bptr[i];j<A->bptr[i+1];j++) { if( A->bindex[j]<i ) { liw[i+1]++; } else if( A->bindex[j]>i ) { uiw[i+1]++; } } } for(i=0;i<nc;i++) { liw[i+1] += liw[i]; uiw[i+1] += uiw[i]; } nnzl = liw[nc]; nnzu = uiw[nc]; #else for(i=0;i<nc;i++) { for(j=A->bptr[i];j<A->bptr[i+1];j++) { if( A->bindex[j]<i ) { nnzl++; } else if( A->bindex[j]>i ) { nnzu++; } } } #endif err = lis_matrix_LU_create(A); if( err ) { return err; } err = lis_matrix_malloc_bsc(np,bnr,bnc,nnzl,&lptr,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_bsc(np,bnr,bnc,nnzu,&uptr,&uindex,&uvalue); if( err ) { lis_free2(6,lptr,lindex,lvalue,uptr,uindex,uvalue); return err; } err = lis_matrix_diag_duplicateM(A,&D); if( err ) { lis_free2(6,lptr,lindex,lvalue,uptr,uindex,uvalue); return err; } #ifdef _OPENMP #pragma omp parallel for private(i) for(i=0;i<nc+1;i++) { lptr[i] = liw[i]; uptr[i] = uiw[i]; } #pragma omp parallel for private(i,j,kl,ku) for(i=0;i<nc;i++) { kl = lptr[i]; ku = uptr[i]; for(j=A->bptr[i];j<A->bptr[i+1];j++) { if( A->bindex[j]<i ) { lindex[kl] = A->bindex[j]; memcpy(&lvalue[bs*kl],&A->value[bs*j],bs*sizeof(LIS_SCALAR));; kl++; } else if( A->bindex[j]>i ) { uindex[ku] = A->bindex[j]; memcpy(&uvalue[bs*ku],&A->value[bs*j],bs*sizeof(LIS_SCALAR)); ku++; } else { memcpy(&D->value[bs*i],&A->value[bs*j],bs*sizeof(LIS_SCALAR)); } } } lis_free2(2,liw,uiw); #else nnzl = 0; nnzu = 0; lptr[0] = 0; uptr[0] = 0; for(i=0;i<nc;i++) { for(j=A->bptr[i];j<A->bptr[i+1];j++) { if( A->bindex[j]<i ) { lindex[nnzl] = A->bindex[j]; memcpy(&lvalue[bs*nnzl],&A->value[bs*j],bs*sizeof(LIS_SCALAR));; nnzl++; } else if( A->bindex[j]>i ) { uindex[nnzu] = A->bindex[j]; memcpy(&uvalue[bs*nnzu],&A->value[bs*j],bs*sizeof(LIS_SCALAR)); nnzu++; } else { memcpy(&D->value[bs*i],&A->value[bs*j],bs*sizeof(LIS_SCALAR)); } } lptr[i+1] = nnzl; uptr[i+1] = nnzu; } #endif A->L->bnr = bnr; A->L->bnc = bnc; A->L->nr = nr; A->L->nc = nc; A->L->bnnz = nnzl; A->L->bptr = lptr; A->L->bindex = lindex; A->L->value = lvalue; A->U->bnr = bnr; A->U->bnc = bnc; A->U->nr = nr; A->U->nc = nc; A->U->bnnz = nnzu; A->U->bptr = uptr; A->U->bindex = uindex; A->U->value = uvalue; A->D = D; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_merge_bsc" LIS_INT lis_matrix_merge_bsc(LIS_MATRIX A) { LIS_INT i,j,n,np,nr,nc; LIS_INT bnnz,bnr,bnc,bs; LIS_INT err; LIS_INT *bptr,*bindex; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; nc = A->nc; nr = A->nr; bnr = A->bnr; bnc = A->bnc; bs = bnr*bnc; bptr = NULL; bindex = NULL; value = NULL; bnnz = A->L->bnnz + A->U->bnnz + nr; err = lis_matrix_malloc_bsc(np,bnr,bnc,bnnz,&bptr,&bindex,&value); if( err ) { return err; } bnnz = 0; bptr[0] = 0; for(i=0;i<nc;i++) { for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { bindex[bnnz] = A->L->bindex[j]; memcpy(&value[bs*bnnz],&A->L->value[bs*j],bs*sizeof(LIS_SCALAR));; bnnz++; } bindex[bnnz] = i; memcpy(&value[bs*bnnz],&A->D->value[bs*i],bs*sizeof(LIS_SCALAR));; bnnz++; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { bindex[bnnz] = A->U->bindex[j]; memcpy(&value[bs*bnnz],&A->U->value[bs*j],bs*sizeof(LIS_SCALAR));; bnnz++; } bptr[i+1] = bnnz; } A->bnnz = bnnz; A->bptr = bptr; A->value = value; A->bindex = bindex; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solve_bsc" LIS_INT lis_matrix_solve_bsc(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,k,ii,jj,nr,bnr,bnc,bs; LIS_SCALAR t0,t1,t2; LIS_SCALAR *b,*x,*w; LIS_DEBUG_FUNC_IN; nr = A->nr; bnr = A->bnr; bnc = A->bnc; bs = A->bnr*A->bnc; b = B->value; x = X->value; switch(flag) { case LIS_MATRIX_LOWER: switch(bnr) { case 1: for(i=0;i<nr;i++) { t0 = b[i]; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; t0 -= A->L->value[j] * x[jj]; } x[i] = A->WD->value[i] * t0; } break; case 2: for(i=0;i<nr;i++) { t0 = b[i*2]; t1 = b[i*2+1]; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; t0 -= A->L->value[j*4+0] * x[jj*2+0]; t1 -= A->L->value[j*4+1] * x[jj*2+0]; t0 -= A->L->value[j*4+2] * x[jj*2+1]; t1 -= A->L->value[j*4+3] * x[jj*2+1]; } x[i*2+0] = A->WD->value[4*i+0] * t0 + A->WD->value[4*i+2] * t1; x[i*2+1] = A->WD->value[4*i+1] * t0 + A->WD->value[4*i+3] * t1; } break; case 3: for(i=0;i<nr;i++) { t0 = b[i*3]; t1 = b[i*3+1]; t2 = b[i*3+2]; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; t0 -= A->L->value[j*9+0] * x[jj*3+0]; t1 -= A->L->value[j*9+1] * x[jj*3+0]; t2 -= A->L->value[j*9+2] * x[jj*3+0]; t0 -= A->L->value[j*9+3] * x[jj*3+1]; t1 -= A->L->value[j*9+4] * x[jj*3+1]; t2 -= A->L->value[j*9+5] * x[jj*3+1]; t0 -= A->L->value[j*9+6] * x[jj*3+2]; t1 -= A->L->value[j*9+7] * x[jj*3+2]; t2 -= A->L->value[j*9+8] * x[jj*3+2]; } x[i*3+0] = A->WD->value[9*i+0] * t0 + A->WD->value[9*i+3] * t1 + A->WD->value[9*i+6] * t2; x[i*3+1] = A->WD->value[9*i+1] * t0 + A->WD->value[9*i+4] * t1 + A->WD->value[9*i+7] * t2; x[i*3+2] = A->WD->value[9*i+2] * t0 + A->WD->value[9*i+5] * t1 + A->WD->value[9*i+8] * t2; } break; default: w = (LIS_SCALAR *)lis_malloc(bnr*sizeof(LIS_SCALAR),"lis_matrix_solve_bsc::w"); for(i=0;i<nr;i++) { for(j=0;j<bnr;j++) { w[j] = b[i*bnr+j]; } for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { k = A->L->bindex[j] * bnc; for(ii=0;ii<bnr;ii++) { t0 = w[ii]; for(jj=0;jj<bnc;jj++) { t0 -= A->L->value[j*bs + jj*bnr+ii] * x[k + jj]; } w[ii] = t0; } } for(ii=0;ii<bnr;ii++) { t0 = 0.0; for(jj=0;jj<bnc;jj++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * w[jj]; } x[i*bnr+ii] = t0; } } lis_free(w); break; } break; case LIS_MATRIX_UPPER: switch(bnr) { case 1: for(i=nr-1;i>=0;i--) { t0 = b[i]; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; t0 -= A->U->value[j] * x[jj]; } x[i] = A->WD->value[i] * t0; } break; case 2: for(i=nr-1;i>=0;i--) { t0 = b[i*2]; t1 = b[i*2+1]; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; t0 -= A->U->value[j*4+0] * x[jj*2+0]; t1 -= A->U->value[j*4+1] * x[jj*2+0]; t0 -= A->U->value[j*4+2] * x[jj*2+1]; t1 -= A->U->value[j*4+3] * x[jj*2+1]; } x[i*2+0] = A->WD->value[4*i+0] * t0 + A->WD->value[4*i+2] * t1; x[i*2+1] = A->WD->value[4*i+1] * t0 + A->WD->value[4*i+3] * t1; } break; case 3: for(i=nr-1;i>=0;i--) { t0 = b[i*3]; t1 = b[i*3+1]; t2 = b[i*3+2]; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; t0 -= A->U->value[j*9+0] * x[jj*3+0]; t1 -= A->U->value[j*9+1] * x[jj*3+0]; t2 -= A->U->value[j*9+2] * x[jj*3+0]; t0 -= A->U->value[j*9+3] * x[jj*3+1]; t1 -= A->U->value[j*9+4] * x[jj*3+1]; t2 -= A->U->value[j*9+5] * x[jj*3+1]; t0 -= A->U->value[j*9+6] * x[jj*3+2]; t1 -= A->U->value[j*9+7] * x[jj*3+2]; t2 -= A->U->value[j*9+8] * x[jj*3+2]; } x[i*3+0] = A->WD->value[9*i+0] * t0 + A->WD->value[9*i+3] * t1 + A->WD->value[9*i+6] * t2; x[i*3+1] = A->WD->value[9*i+1] * t0 + A->WD->value[9*i+4] * t1 + A->WD->value[9*i+7] * t2; x[i*3+2] = A->WD->value[9*i+2] * t0 + A->WD->value[9*i+5] * t1 + A->WD->value[9*i+8] * t2; } break; default: w = (LIS_SCALAR *)lis_malloc(bnr*sizeof(LIS_SCALAR),"lis_matrix_solve_bsc::w"); for(i=nr-1;i>=0;i--) { for(j=0;j<bnr;j++) { w[j] = b[i*bnr+j]; } for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { k = A->U->bindex[j] * bnc; for(ii=0;ii<bnr;ii++) { t0 = w[ii]; for(jj=0;jj<bnc;jj++) { t0 -= A->U->value[j*bs + jj*bnr+ii] * x[k + jj]; } w[ii] = t0; } } for(ii=0;ii<bnr;ii++) { t0 = 0.0; for(jj=0;jj<bnc;jj++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * w[jj]; } x[i*bnr+ii] = t0; } } lis_free(w); break; } break; case LIS_MATRIX_SSOR: switch(bnr) { case 1: for(i=0;i<nr;i++) { t0 = b[i]; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; t0 -= A->L->value[j] * x[jj]; } x[i] = A->WD->value[i] * t0; } for(i=nr-1;i>=0;i--) { t0 = 0.0; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; t0 += A->U->value[j] * x[jj]; } x[i] -= A->WD->value[i] * t0; } break; case 2: for(i=0;i<nr;i++) { t0 = b[i*2]; t1 = b[i*2+1]; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; t0 -= A->L->value[j*4+0] * x[jj*2+0]; t1 -= A->L->value[j*4+1] * x[jj*2+0]; t0 -= A->L->value[j*4+2] * x[jj*2+1]; t1 -= A->L->value[j*4+3] * x[jj*2+1]; } x[i*2+0] = A->WD->value[4*i+0] * t0 + A->WD->value[4*i+2] * t1; x[i*2+1] = A->WD->value[4*i+1] * t0 + A->WD->value[4*i+3] * t1; } for(i=nr-1;i>=0;i--) { t0 = 0.0; t1 = 0.0; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; t0 += A->U->value[j*4+0] * x[jj*2+0]; t1 += A->U->value[j*4+1] * x[jj*2+0]; t0 += A->U->value[j*4+2] * x[jj*2+1]; t1 += A->U->value[j*4+3] * x[jj*2+1]; } x[i*2+0] -= A->WD->value[4*i+0] * t0 + A->WD->value[4*i+2] * t1; x[i*2+1] -= A->WD->value[4*i+1] * t0 + A->WD->value[4*i+3] * t1; } break; case 3: for(i=0;i<nr;i++) { t0 = b[i*bnr]; t1 = b[i*bnr+1]; t2 = b[i*bnr+2]; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; t0 -= A->L->value[j*9+0] * x[jj*3+0]; t1 -= A->L->value[j*9+1] * x[jj*3+0]; t2 -= A->L->value[j*9+2] * x[jj*3+0]; t0 -= A->L->value[j*9+3] * x[jj*3+1]; t1 -= A->L->value[j*9+4] * x[jj*3+1]; t2 -= A->L->value[j*9+5] * x[jj*3+1]; t0 -= A->L->value[j*9+6] * x[jj*3+2]; t1 -= A->L->value[j*9+7] * x[jj*3+2]; t2 -= A->L->value[j*9+8] * x[jj*3+2]; } x[i*bnr+0] = A->WD->value[9*i+0] * t0 + A->WD->value[9*i+3] * t1 + A->WD->value[9*i+6] * t2; x[i*bnr+1] = A->WD->value[9*i+1] * t0 + A->WD->value[9*i+4] * t1 + A->WD->value[9*i+7] * t2; x[i*bnr+2] = A->WD->value[9*i+2] * t0 + A->WD->value[9*i+5] * t1 + A->WD->value[9*i+8] * t2; } for(i=nr-1;i>=0;i--) { t0 = 0.0; t1 = 0.0; t2 = 0.0; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; t0 += A->U->value[j*9+0] * x[jj*3+0]; t1 += A->U->value[j*9+1] * x[jj*3+0]; t2 += A->U->value[j*9+2] * x[jj*3+0]; t0 += A->U->value[j*9+3] * x[jj*3+1]; t1 += A->U->value[j*9+4] * x[jj*3+1]; t2 += A->U->value[j*9+5] * x[jj*3+1]; t0 += A->U->value[j*9+6] * x[jj*3+2]; t1 += A->U->value[j*9+7] * x[jj*3+2]; t2 += A->U->value[j*9+8] * x[jj*3+2]; } x[i*3+0] -= A->WD->value[9*i+0] * t0 + A->WD->value[9*i+3] * t1 + A->WD->value[9*i+6] * t2; x[i*3+1] -= A->WD->value[9*i+1] * t0 + A->WD->value[9*i+4] * t1 + A->WD->value[9*i+7] * t2; x[i*3+2] -= A->WD->value[9*i+2] * t0 + A->WD->value[9*i+5] * t1 + A->WD->value[9*i+8] * t2; } break; default: w = (LIS_SCALAR *)lis_malloc(bnr*sizeof(LIS_SCALAR),"lis_matrix_solve_bsc::w"); for(i=0;i<nr;i++) { for(j=0;j<bnr;j++) { w[j] = b[i*bnr+j]; } for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { k = A->L->bindex[j] * bnc; for(ii=0;ii<bnr;ii++) { t0 = w[ii]; for(jj=0;jj<bnc;jj++) { t0 -= A->L->value[j*bs + jj*bnr+ii] * x[k + jj]; } w[ii] = t0; } } for(ii=0;ii<bnr;ii++) { t0 = 0.0; for(jj=0;jj<bnc;jj++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * w[jj]; } x[i*bnr+ii] = t0; } } for(i=nr-1;i>=0;i--) { for(j=0;j<bnr;j++) { w[j] = 0.0; } for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { k = A->U->bindex[j] * bnc; for(ii=0;ii<bnr;ii++) { t0 = w[ii]; for(jj=0;jj<bnc;jj++) { t0 += A->U->value[j*bs + jj*bnr+ii] * x[k + jj]; } w[ii] = t0; } } for(ii=0;ii<bnr;ii++) { t0 = 0.0; for(jj=0;jj<bnc;jj++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * w[jj]; } x[i*bnr+ii] -= t0; } } lis_free(w); break; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solvet_bsc" LIS_INT lis_matrix_solvet_bsc(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,k,ii,jj,nr,bnr,bnc,bs; LIS_SCALAR t0,t1,t2; LIS_SCALAR *b,*x,*w; LIS_DEBUG_FUNC_IN; nr = A->nr; bnr = A->bnr; bnc = A->bnc; bs = A->bnr*A->bnc; b = B->value; x = X->value; lis_vector_copy(B,X); switch(flag) { case LIS_MATRIX_LOWER: switch(bnr) { case 1: for(i=0;i<nr;i++) { x[i] = x[i] * A->WD->value[i]; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; x[jj] -= A->U->value[j] * x[i]; } } break; case 2: for(i=0;i<nr;i++) { t0 = A->WD->value[4*i+0] * x[i*2] + A->WD->value[4*i+1] * x[i*2+1]; t1 = A->WD->value[4*i+2] * x[i*2] + A->WD->value[4*i+3] * x[i*2+1]; x[i*2+0] = t0; x[i*2+1] = t1; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; x[jj*2+0] -= A->U->value[j*4+0] * t0 + A->U->value[j*4+1] * t1; x[jj*2+1] -= A->U->value[j*4+2] * t0 + A->U->value[j*4+3] * t1; } } break; case 3: for(i=0;i<nr;i++) { t0 = A->WD->value[9*i+0] * x[i*3] + A->WD->value[9*i+1] * x[i*3+1] + A->WD->value[9*i+2] * x[i*3+2]; t1 = A->WD->value[9*i+3] * x[i*3] + A->WD->value[9*i+4] * x[i*3+1] + A->WD->value[9*i+5] * x[i*3+2]; t2 = A->WD->value[9*i+6] * x[i*3] + A->WD->value[9*i+7] * x[i*3+1] + A->WD->value[9*i+8] * x[i*3+2]; x[i*3] = t0; x[i*3+1] = t1; x[i*3+2] = t2; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; x[jj*3+0] -= A->U->value[j*9+0] * t0 + A->U->value[j*9+1] * t1 + A->U->value[j*9+2] * t2; x[jj*3+1] -= A->U->value[j*9+3] * t0 + A->U->value[j*9+4] * t1 + A->U->value[j*9+5] * t2; x[jj*3+2] -= A->U->value[j*9+6] * t0 + A->U->value[j*9+7] * t1 + A->U->value[j*9+8] * t2; } } break; default: w = (LIS_SCALAR *)lis_malloc(bnc*sizeof(LIS_SCALAR),"lis_matrix_solvet_bsc::w"); for(i=0;i<nr;i++) { for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * x[i*bnr + ii]; } w[jj] = t0; } memcpy(&x[i*bnr],w,bnr*sizeof(LIS_SCALAR)); for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { k = A->U->bindex[j] * bnc; for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->U->value[j*bs + jj*bnr+ii] * w[ii]; } x[k + jj] -= t0; } } } lis_free(w); break; } break; case LIS_MATRIX_UPPER: switch(bnr) { case 1: for(i=nr-1;i>=0;i--) { x[i] = x[i] * A->WD->value[i]; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; x[jj] -= A->L->value[j] * x[i]; } } break; case 2: for(i=nr-1;i>=0;i--) { t0 = A->WD->value[4*i+0] * x[i*2] + A->WD->value[4*i+1] * x[i*2+1]; t1 = A->WD->value[4*i+2] * x[i*2] + A->WD->value[4*i+3] * x[i*2+1]; x[i*2+0] = t0; x[i*2+1] = t1; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; x[jj*2+0] -= A->L->value[j*4+0] * t0 + A->L->value[j*4+1] * t1; x[jj*2+1] -= A->L->value[j*4+2] * t0 + A->L->value[j*4+3] * t1; } } break; case 3: for(i=nr-1;i>=0;i--) { t0 = A->WD->value[9*i+0] * x[i*3] + A->WD->value[9*i+1] * x[i*3+1] + A->WD->value[9*i+2] * x[i*3+2]; t1 = A->WD->value[9*i+3] * x[i*3] + A->WD->value[9*i+4] * x[i*3+1] + A->WD->value[9*i+5] * x[i*3+2]; t2 = A->WD->value[9*i+6] * x[i*3] + A->WD->value[9*i+7] * x[i*3+1] + A->WD->value[9*i+8] * x[i*3+2]; x[i*3] = t0; x[i*3+1] = t1; x[i*3+2] = t2; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; x[jj*3+0] -= A->L->value[j*9+0] * t0 + A->L->value[j*9+1] * t1 + A->L->value[j*9+2] * t2; x[jj*3+1] -= A->L->value[j*9+3] * t0 + A->L->value[j*9+4] * t1 + A->L->value[j*9+5] * t2; x[jj*3+2] -= A->L->value[j*9+6] * t0 + A->L->value[j*9+7] * t1 + A->L->value[j*9+8] * t2; } } break; default: w = (LIS_SCALAR *)lis_malloc(bnr*sizeof(LIS_SCALAR),"lis_matrix_solvet_bsc::w"); for(i=nr-1;i>=0;i--) { for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * x[i*bnr + ii]; } w[jj] = t0; } memcpy(&x[i*bnr],w,bnr*sizeof(LIS_SCALAR)); for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { k = A->L->bindex[j] * bnc; for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->L->value[j*bs + jj*bnr+ii] * w[ii]; } x[k + jj] -= t0; } } } lis_free(w); break; } break; case LIS_MATRIX_SSOR: switch(bnr) { case 1: for(i=0;i<nr;i++) { t0 = x[i] * A->WD->value[i]; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; x[jj] -= A->U->value[j] * t0; } } for(i=nr-1;i>=0;i--) { t0 = x[i] * A->WD->value[i]; x[i] = t0; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; x[jj] -= A->L->value[j] * t0; } } break; case 2: for(i=0;i<nr;i++) { t0 = A->WD->value[4*i+0] * x[i*2] + A->WD->value[4*i+1] * x[i*2+1]; t1 = A->WD->value[4*i+2] * x[i*2] + A->WD->value[4*i+3] * x[i*2+1]; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; x[jj*2+0] -= A->U->value[j*4+0] * t0 + A->U->value[j*4+1] * t1; x[jj*2+1] -= A->U->value[j*4+2] * t0 + A->U->value[j*4+3] * t1; } } for(i=nr-1;i>=0;i--) { t0 = A->WD->value[4*i+0] * x[i*2] + A->WD->value[4*i+1] * x[i*2+1]; t1 = A->WD->value[4*i+2] * x[i*2] + A->WD->value[4*i+3] * x[i*2+1]; x[i*2+0] = t0; x[i*2+1] = t1; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; x[jj*2+0] -= A->L->value[j*4+0] * t0 + A->L->value[j*4+1] * t1; x[jj*2+1] -= A->L->value[j*4+2] * t0 + A->L->value[j*4+3] * t1; } } break; case 3: for(i=0;i<nr;i++) { t0 = A->WD->value[9*i+0] * x[i*3] + A->WD->value[9*i+1] * x[i*3+1] + A->WD->value[9*i+2] * x[i*3+2]; t1 = A->WD->value[9*i+3] * x[i*3] + A->WD->value[9*i+4] * x[i*3+1] + A->WD->value[9*i+5] * x[i*3+2]; t2 = A->WD->value[9*i+6] * x[i*3] + A->WD->value[9*i+7] * x[i*3+1] + A->WD->value[9*i+8] * x[i*3+2]; for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { jj = A->U->bindex[j]; x[jj*3+0] -= A->U->value[j*9+0] * t0 + A->U->value[j*9+1] * t1 + A->U->value[j*9+2] * t2; x[jj*3+1] -= A->U->value[j*9+3] * t0 + A->U->value[j*9+4] * t1 + A->U->value[j*9+5] * t2; x[jj*3+2] -= A->U->value[j*9+6] * t0 + A->U->value[j*9+7] * t1 + A->U->value[j*9+8] * t2; } } for(i=nr-1;i>=0;i--) { t0 = A->WD->value[9*i+0] * x[i*3] + A->WD->value[9*i+1] * x[i*3+1] + A->WD->value[9*i+2] * x[i*3+2]; t1 = A->WD->value[9*i+3] * x[i*3] + A->WD->value[9*i+4] * x[i*3+1] + A->WD->value[9*i+5] * x[i*3+2]; t2 = A->WD->value[9*i+6] * x[i*3] + A->WD->value[9*i+7] * x[i*3+1] + A->WD->value[9*i+8] * x[i*3+2]; x[i*3] = t0; x[i*3+1] = t1; x[i*3+2] = t2; for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { jj = A->L->bindex[j]; x[jj*3+0] -= A->L->value[j*9+0] * t0 + A->L->value[j*9+1] * t1 + A->L->value[j*9+2] * t2; x[jj*3+1] -= A->L->value[j*9+3] * t0 + A->L->value[j*9+4] * t1 + A->L->value[j*9+5] * t2; x[jj*3+2] -= A->L->value[j*9+6] * t0 + A->L->value[j*9+7] * t1 + A->L->value[j*9+8] * t2; } } break; default: w = (LIS_SCALAR *)lis_malloc(bnc*sizeof(LIS_SCALAR),"lis_matrix_solvet_bsc::w"); for(i=0;i<nr;i++) { for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * x[i*bnr + ii]; } w[jj] = t0; } for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++) { k = A->U->bindex[j] * bnc; for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->U->value[j*bs + jj*bnr+ii] * w[ii]; } x[k + jj] -= t0; } } } for(i=nr-1;i>=0;i--) { for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->WD->value[i*bs + jj*bnr+ii] * x[i*bnr + ii]; } w[jj] = t0; } memcpy(&x[i*bnr],w,bnr*sizeof(LIS_SCALAR)); for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++) { k = A->L->bindex[j] * bnc; for(jj=0;jj<bnc;jj++) { t0 = 0.0; for(ii=0;ii<bnr;ii++) { t0 += A->L->value[j*bs + jj*bnr+ii] * w[ii]; } x[k + jj] -= t0; } } } lis_free(w); break; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
signalMachine.c
#include <getopt.h> #include <string.h> #include "signalMachineUtils.h" #include "pairwiseAligner.h" #include "fasta_handler.h" #define ESTIMATE_PARAMS 1 #define ASSIGNMENT_THRESHOLD 0.1 typedef enum { full = 0, variantCaller = 1, assignments = 2, both = 3 } OutputFormat; void usage() { fprintf(stderr, "\n\tsignalMachine - Align ONT ionic current to a reference sequence\n\n"); fprintf(stderr, "--help: Display this super useful message and exit\n"); fprintf(stderr, "--sm3Hdp, -d: Flag, enable HMM-HDP model\n"); fprintf(stderr, "--twoD, -e: Flag, use 2D workflow (enables complement alignment)\n"); fprintf(stderr, "-s: Output format, 0=full, 1=variantCaller, 2=assignments\n"); fprintf(stderr, "-o: Degernate, 0=C/E, 1=C/E/O, 2=A/I, 3=A/C/G/T, 4=J/T, 5=A/F"); fprintf(stderr, "-T: Template HMM model\n"); fprintf(stderr, "-C: Complement HMM model\n"); fprintf(stderr, "-L: Read (output) label\n"); fprintf(stderr, "-q: NanoporeRead (in npRead format)\n"); fprintf(stderr, "-f: Forward reference to align to as a flat file\n"); fprintf(stderr, "-b: Backward reference to align to as a flat file\n"); fprintf(stderr, "-p: Guide alignment file, containing CIGARs in EXONERATE format\n"); fprintf(stderr, "-u: Posteriors (output) file path, place to put the output\n"); fprintf(stderr, "-v: TemplateHDP file\n"); fprintf(stderr, "-w: Complement HDP file\n"); fprintf(stderr, "-t: Template expectations (HMM transitions) output location\n"); fprintf(stderr, "-c: Complement expectations (HMM transitions) output location\n"); fprintf(stderr, "-x: Diagonal expansion, how much to expand the dynamic programming envelope\n"); fprintf(stderr, "-D: Posterior probability threshold, keep aligned pairs with posterior prob >= this\n"); fprintf(stderr, "-m: Constranint trim, how much to trim the guide alignment anchors by\n"); fprintf(stderr, "-g: traceBackDiagonals, how many backward diagonals to calculate during traceback\n"); fprintf(stderr, "-r: boolean option if read is RNA\n\n"); } void printPairwiseAlignmentSummary(struct PairwiseAlignment *pA) { st_uglyf("contig 1: %s\n", pA->contig1); st_uglyf("strand 1: %lld\n", pA->strand1); st_uglyf("start 1: %lld\n", pA->start1); st_uglyf("end 1: %lld\n", pA->end1); st_uglyf("contig 2: %s\n", pA->contig2); st_uglyf("strand 2: %lld\n", pA->strand2); st_uglyf("start 2: %lld\n", pA->start2); st_uglyf("end 2: %lld\n", pA->end2); } static inline int64_t adjustReferenceCoordinate(int64_t x_i, int64_t referenceSeqOffset, int64_t referenceLengthInKmers, int64_t referenceLength, Strand strand, bool forward) { if ((strand == template && forward) || (strand == complement && !forward)) { return x_i + referenceSeqOffset; } else { return referenceLengthInKmers - (x_i + (referenceLength - referenceSeqOffset)); } } static inline char *makeReferenceKmer(const char *k_i, Strand strand, bool forward) { if ((strand == template && forward) || (strand == complement && !forward)) { return stString_copy(k_i); } else { return stString_reverseComplementString(k_i); } } static inline char *kmerFromString(const char *string, int64_t start, int64_t kmerLength) { char *k_i = st_malloc((kmerLength + 1) * sizeof(char)); for (int64_t i = 0; i < kmerLength; i++) { k_i[i] = *(string + (start + i)); } k_i[kmerLength] = '\0'; return k_i; } static inline int64_t adjustQueryPosition(int64_t unadjustedQueryPosition, int64_t kmerLength, Strand strand, bool forward) { if ((strand == template && forward) || (strand == complement && !forward)) { return unadjustedQueryPosition; } else { return (kmerLength - 1) - unadjustedQueryPosition; } } void writePosteriorProbsFull(char *posteriorProbsFile, char *readLabel, StateMachine *sM, NanoporeReadAdjustmentParameters npp, double *events, char *target, bool forward, char *contig, int64_t eventSequenceOffset, int64_t referenceSequenceOffset, stList *alignedPairs, Strand strand, bool rna) { // label for tsv output char *strandLabel = strand == template ? "t" : "c"; // open the file for output FILE *fH = fopen(posteriorProbsFile, "a"); // get some lengths outside the loop int64_t refLength = (int64_t )strlen(target); int64_t refLengthInKmers = refLength - sM->kmerLength; // printf("%" PRIu64 "\n", stList_length(alignedPairs)); for(int64_t i = 0; i < stList_length(alignedPairs); i++) { // grab the aligned pair stIntTuple *aPair = stList_get(alignedPairs, i); if (stIntTuple_length(aPair) != 4) { st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n", stIntTuple_length(aPair)); } // nucleotide sequence coordinate int64_t x_i = stIntTuple_get(aPair, 1); // adjust back to reference coordinates int64_t x_adj = adjustReferenceCoordinate(x_i, referenceSequenceOffset, refLengthInKmers, refLength, strand, forward); // event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment) int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset; // posterior probability double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1; // path (variant-called) kmer char *pathKmer = (char *)stIntTuple_get(aPair, 3); double eventMean = sequence_getEventMean(events, y); double eventNoise = sequence_getEventNoise(events, y); double eventDuration = sequence_getEventDuration(events, y); // make the kmer string at the target index, char *k_i = kmerFromString(target, x_i, sM->kmerLength); int64_t targetKmerIndex = kmer_id(pathKmer, sM->alphabet, sM->alphabetSize, sM->kmerLength); // get the expected event mean amplitude and noise double E_mean = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS)]; double E_noise = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS + 2)]; double scaled_Emean = E_mean * npp.scale + npp.shift; double scaled_Enoise = E_noise * npp.scale_sd; double descaledEventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, E_mean, npp.scale, npp.shift, npp.var); // make reference kmer char *refKmer = makeReferenceKmer(k_i, strand, forward); if (rna){ refKmer = stString_reverseComplementString(refKmer); } // write to file fprintf(fH, "%s\t%"PRId64"\t%s\t%s\t%s\t%"PRId64"\t%f\t%f\t%f\t%s\t%f\t%f\t%f\t%f\t%f\t%s\n", contig, x_adj, refKmer, readLabel, strandLabel, y, eventMean, eventNoise, eventDuration, k_i, scaled_Emean, scaled_Enoise, p, descaledEventMean, E_mean, pathKmer); // cleanup free(k_i); free(refKmer); } fclose(fH); } void writePosteriorProbsVC(char *posteriorProbsFile, char *readLabel, StateMachine *sM, char *target, bool forward, int64_t eventSequenceOffset, int64_t referenceSequenceOffset, stList *alignedPairs, Strand strand, double posteriorScore, bool rna, char *contig) { // label for tsv output char *strandLabel = strand == template ? "t" : "c"; if (rna || strand != template){ forward = !forward; } char *forwardLabel = forward ? "forward" : "backward"; if (rna || strand != template){ forward = !forward; } // open the file for output FILE *fH = fopen(posteriorProbsFile, "a"); // get some lengths outside the loop int64_t refLength = (int64_t )strlen(target); int64_t refLengthInKmers = refLength - sM->kmerLength; for(int64_t i = 0; i < stList_length(alignedPairs); i++) { // grab the aligned pair stIntTuple *aPair = stList_get(alignedPairs, i); if (stIntTuple_length(aPair) != 4) { st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n", stIntTuple_length(aPair)); } // trimmed nucleotide sequence coordinate int64_t x_i = stIntTuple_get(aPair, 1); // make the kmer string at the target index, char *k_i = kmerFromString(target, x_i, sM->kmerLength); char *refKmer = makeReferenceKmer(k_i, strand, forward); stList *queryPositions = path_findDegeneratePositions(refKmer, sM->kmerLength); // check if this aligned pair reports on a query position if (stList_length(queryPositions) == 0) { free(k_i); free(refKmer); stList_destruct(queryPositions); continue; } // adjust back to reference coordinates int64_t x_adj = adjustReferenceCoordinate(x_i, referenceSequenceOffset, refLengthInKmers, refLength, strand, forward); // event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment) int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset; // posterior probability double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1; // path (variant-called) kmer char *pathKmer = (char *)stIntTuple_get(aPair, 3); // get the base that was called in this aligned pair int64_t nQueryPositions = stList_length(queryPositions); for (int64_t q = 0; q < nQueryPositions; q++) { // position in the reference kmer eg. AGXGG -> 2 int64_t unadjustedQueryPosition = *(int64_t *)stList_get(queryPositions, q); // position in the pathKmer int64_t queryPosition = adjustQueryPosition(unadjustedQueryPosition, sM->kmerLength, strand, forward); // called base char base = pathKmer[queryPosition]; // position in the reference we're reporting on int64_t reportPosition = x_adj + unadjustedQueryPosition; fprintf(fH, "%"PRId64"\t%"PRId64"\t%c\t%f\t%s\t%s\t%s\t%f\t%s\n", y, reportPosition, base, p, strandLabel, forwardLabel, readLabel, posteriorScore, contig); } free(k_i); free(refKmer); stList_destruct(queryPositions); } fclose(fH); } void writeAssignments(char *posteriorProbsFile, StateMachine *sM, double *events, int64_t eventSequenceOffset, NanoporeReadAdjustmentParameters npp, stList *alignedPairs, Strand strand) { // label for tsv output char *strandLabel = strand == template ? "t" : "c"; // open the file for output FILE *fH = fopen(posteriorProbsFile, "a"); for(int64_t i = 0; i < stList_length(alignedPairs); i++) { // grab the aligned pair stIntTuple *aPair = stList_get(alignedPairs, i); if (stIntTuple_length(aPair) != 4) { st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n", stIntTuple_length(aPair)); } // event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment) int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset; // posterior probability double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1; // path (variant-called) kmer char *pathKmer = (char *)stIntTuple_get(aPair, 3); // get the observed event mean double eventMean = sequence_getEventMean(events, y); // get the kmer index int64_t targetKmerIndex = kmer_id(pathKmer, sM->alphabet, sM->alphabetSize, sM->kmerLength); // get the expected mean from the model double E_mean = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS)]; // descale the observed mean double descaledEventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, E_mean, npp.scale, npp.shift, npp.var); fprintf(fH, "%s\t%s\t%lf\t%lf\n", pathKmer, strandLabel, descaledEventMean, p); } fclose(fH); } void outputAlignment( OutputFormat fmt, char *posteriorProbsFile, char *readLabel, StateMachine *sM, NanoporeReadAdjustmentParameters npp, double *events, char *target, bool forward, char *contig, int64_t eventSequenceOffset, int64_t referenceSequenceOffset, stList *alignedPairs, double posteriorScore, Strand strand, bool rna, char *posteriorProbsFile2) { switch (fmt) { case full: writePosteriorProbsFull(posteriorProbsFile, readLabel, sM, npp, events, target, forward, contig, eventSequenceOffset, referenceSequenceOffset, alignedPairs, strand, rna); break; case variantCaller: writePosteriorProbsVC(posteriorProbsFile, readLabel, sM, target, forward, eventSequenceOffset, referenceSequenceOffset, alignedPairs, strand, posteriorScore, rna, contig); break; case assignments: writeAssignments(posteriorProbsFile, sM, events, eventSequenceOffset, npp, alignedPairs, strand); break; case both: writePosteriorProbsFull(posteriorProbsFile, readLabel, sM, npp, events, target, forward, contig, eventSequenceOffset, referenceSequenceOffset, alignedPairs, strand, rna); writePosteriorProbsVC(posteriorProbsFile2, readLabel, sM, target, forward, eventSequenceOffset, referenceSequenceOffset, alignedPairs, strand, posteriorScore, rna, contig); break; default: fprintf(stderr, "signalAlign - No valid output format provided\n"); return; } } StateMachine *buildStateMachine(const char *modelFile, NanoporeReadAdjustmentParameters npp, StateMachineType type, NanoporeHDP *nHdp) { if ((type != threeState) && (type != threeStateHdp)) { st_errAbort("signalAlign - incompatible stateMachine type request"); } if (!stFile_exists(modelFile)) { st_errAbort("signalAlign - ERROR: couldn't find model file here: %s\n", modelFile); } if (type == threeState) { StateMachine *sM = getStateMachine3_descaled(modelFile, npp, !ESTIMATE_PARAMS); return sM; } if (type == threeStateHdp) { StateMachine *sM = getHdpStateMachine(nHdp, modelFile, npp); return sM; } else { st_errAbort("signalAlign - ERROR: buildStateMachine, didn't get correct input\n"); } return 0; } inline void loadHmmRoutine(const char *hmmFile, StateMachine *sM, StateMachineType type, Hmm *expectations) { if ((type != threeState) && (type != threeStateHdp)) { st_errAbort("LoadSignalHmm : unupported stateMachineType"); } hmmContinuous_loadSignalHmm(hmmFile, sM, type, expectations); } StateMachine *buildStateMachineAndLoadHmm(const char *modelFile, NanoporeReadAdjustmentParameters npp, StateMachineType type, NanoporeHDP *nHdp) { StateMachine *sM = buildStateMachine(modelFile, npp, type, nHdp); // commented out because now the model file has the transitions and the event model, so no longer need to // load the .hmm into the stateMachine //if (HmmFile != NULL) { // loadHmmRoutine(HmmFile, sM, sM->type, hmmExpectations); //} return sM; } void updateHdpFromAssignments(const char *nHdpFile, const char *expectationsFile, const char *nHdpOutFile) { NanoporeHDP *nHdp = deserialize_nhdp(nHdpFile); Hmm *hdpHmm = hdpHmm_loadFromFile(expectationsFile, threeStateHdp, nHdp); hmmContinuous_destruct(hdpHmm, hdpHmm->type); fprintf(stderr, "signalAlign - Running Gibbs on HDP\n"); execute_nhdp_gibbs_sampling(nHdp, 10000, 100000, 100, FALSE); finalize_nhdp_distributions(nHdp); fprintf(stderr, "signalAlign - Serializing HDP to %s\n", nHdpOutFile); serialize_nhdp(nHdp, nHdpOutFile); destroy_nanopore_hdp(nHdp); } static double totalScore(stList *alignedPairs) { double score = 0.0; for (int64_t i = 0; i < stList_length(alignedPairs); i++) { stIntTuple *aPair = stList_get(alignedPairs, i); score += stIntTuple_get(aPair, 0); } return score; } double scoreByPosteriorProbabilityIgnoringGaps(stList *alignedPairs) { /* * Gives the average posterior match probability per base of the two sequences, ignoring indels. */ return 100.0 * totalScore(alignedPairs) / ((double) stList_length(alignedPairs) * PAIR_ALIGNMENT_PROB_1); } stList *performSignalAlignment(StateMachine *sM, Sequence *eventSequence, int64_t *eventMap, int64_t mapOffset, char *target, PairwiseAlignmentParameters *p, stList *unmappedAnchors, DegenerateType degenerate) { if ((sM->type != threeState) && (sM->type != threeStateHdp)) { st_errAbort("signalAlign - You're trying to do the wrong king of alignment"); } int64_t lX = sequence_correctSeqLength(strlen(target), kmer, sM->kmerLength); // remap anchor pairs stList *filteredRemappedAnchors = signalUtils_getRemappedAnchorPairs(unmappedAnchors, eventMap, mapOffset); // make sequences Sequence *sX = sequence_constructReferenceKmerSequence(lX, target, sequence_getKmer, sequence_sliceNucleotideSequence, degenerate, kmer); // do alignment stList *alignedPairs = getAlignedPairsUsingAnchors(sM, sX, eventSequence, filteredRemappedAnchors, p, diagonalCalculationPosteriorMatchProbs, 1, 1); sequence_destruct(sX); return alignedPairs; } Sequence *makeEventSequenceFromPairwiseAlignment(double *events, int64_t queryStart, int64_t queryEnd, int64_t *eventMap) { // find the event mapped to the start and end of the 2D read alignment int64_t startIdx = eventMap[queryStart]; // We end up indexing past length of eventMap if we map to final base int64_t endIdx = eventMap[queryEnd-1]; // move the event pointer to the first event size_t elementSize = sizeof(double); void *elements = (char *)events + ((startIdx * NB_EVENT_PARAMS) * elementSize); // make the eventSequence Sequence *eventS = sequence_constructEventSequence(endIdx - startIdx, elements); return eventS; } void getSignalExpectations(StateMachine *sM, Hmm *hmmExpectations, Sequence *eventSequence, int64_t *eventMap, int64_t mapOffset, char *trainingTarget, PairwiseAlignmentParameters *p, stList *unmappedAnchors, DegenerateType degenerate) { // correct sequence length int64_t lX = sequence_correctSeqLength(strlen(trainingTarget), event, sM->kmerLength); // remap the anchors stList *filteredRemappedAnchors = signalUtils_getRemappedAnchorPairs(unmappedAnchors, eventMap, mapOffset); Sequence *target = sequence_constructKmerSequence( lX, trainingTarget, sequence_getKmer, sequence_sliceNucleotideSequence, (degenerate == canonicalVariants ? CANONICAL_NUCLEOTIDES : (degenerate == cytosineMethylation2 ? TWO_CYTOSINES : THREE_CYTOSINES)), (degenerate == canonicalVariants ? NB_CANONICAL_BASES : (degenerate == cytosineMethylation2 ? (NB_CYTOSINE_OPTIONS - 1) : NB_CYTOSINE_OPTIONS)), kmer); getExpectationsUsingAnchors(sM, hmmExpectations, target, eventSequence, filteredRemappedAnchors, p, diagonalCalculation_Expectations, 1, 1); } int main(int argc, char *argv[]) { StateMachineType sMtype = threeState; int64_t j = 0; int64_t diagExpansion = 50; double threshold = 0.01; int64_t constraintTrim = 14; int64_t traceBackDiagonals = 50; int64_t degenerate; int64_t outFmt; bool twoD = FALSE; bool rna = FALSE; char *templateModelFile = NULL; char *complementModelFile = NULL; char *readLabel = NULL; char *npReadFile = NULL; char *exonerateCigarFile= NULL; char *posteriorProbsFile = NULL; char *templateExpectationsFile = NULL; char *complementExpectationsFile = NULL; char *templateHdp = NULL; char *complementHdp = NULL; char *forward_reference_path = NULL; char *backward_reference_path = NULL; char *posteriorProbsFile2 = NULL; const char *sequence_name = NULL; int key; while (1) { static struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"sm3Hdp", no_argument, 0, 'd'}, {"sparse_output", no_argument, 0, 's'}, {"twoD", no_argument, 0, 'e'}, {"rna", no_argument, 0, 'r'}, {"degenerate", required_argument, 0, 'o'}, {"templateModel", required_argument, 0, 'T'}, {"complementModel", required_argument, 0, 'C'}, {"readLabel", required_argument, 0, 'L'}, {"npRead", required_argument, 0, 'q'}, {"exonerate_cigar_file", required_argument, 0, 'p'}, {"posteriors", required_argument, 0, 'u'}, {"templateHdp", required_argument, 0, 'v'}, {"complementHdp", required_argument, 0, 'w'}, {"templateExpectations", required_argument, 0, 't'}, {"complementExpectations", required_argument, 0, 'c'}, {"diagonalExpansion", required_argument, 0, 'x'}, {"threshold", required_argument, 0, 'D'}, {"constraintTrim", required_argument, 0, 'm'}, {"forward_reference_path", required_argument, 0, 'f'}, {"backward_reference_path", optional_argument, 0, 'b'}, {"sequence_name", required_argument, 0, 'n'}, {"traceBackDiagonals", optional_argument, 0, 'g'}, {"posteriorProbsFile2", optional_argument, 0, 'i'}, {0, 0, 0, 0} }; int option_index = 0; key = getopt_long(argc, argv, "h:d:e:s:r:o:a:T:C:L:q:f:b:g:i:p:u:v:w:t:c:x:D:m:n:", long_options, &option_index); if (key == -1) { //usage(); break; } switch (key) { case 'h': usage(); return 1; case 's': j = sscanf(optarg, "%" PRIi64 "", &outFmt); assert (j == 1); break; case 'e': twoD = TRUE; break; case 'r': rna = TRUE; break; case 'o': j = sscanf(optarg, "%" PRIi64 "", &degenerate); assert (j == 1); break; case 'd': sMtype = threeStateHdp; break; case 'T': templateModelFile = stString_copy(optarg); break; case 'C': complementModelFile = stString_copy(optarg); break; case 'L': readLabel = stString_copy(optarg); break; case 'q': npReadFile = stString_copy(optarg); break; case 'p': exonerateCigarFile = stString_copy(optarg); break; case 'u': posteriorProbsFile = stString_copy(optarg); break; case 't': templateExpectationsFile = stString_copy(optarg); break; case 'c': complementExpectationsFile = stString_copy(optarg); break; case 'v': templateHdp = stString_copy(optarg); break; case 'w': complementHdp = stString_copy(optarg); break; case 'x': j = sscanf(optarg, "%" PRIi64 "", &diagExpansion); assert (j == 1); assert (diagExpansion >= 0); diagExpansion = (int64_t)diagExpansion; break; case 'D': j = sscanf(optarg, "%lf", &threshold); assert (j == 1); assert (threshold >= 0); break; case 'm': j = sscanf(optarg, "%" PRIi64 "", &constraintTrim); assert (j == 1); assert (constraintTrim >= 0); constraintTrim = (int64_t)constraintTrim; break; case 'f': forward_reference_path = stString_copy(optarg); break; case 'b': backward_reference_path = stString_copy(optarg); break; case 'n': sequence_name = stString_copy(optarg); break; case 'g': j = sscanf(optarg, "%" PRIi64 "", &traceBackDiagonals); assert (j == 1); assert (traceBackDiagonals >= 0); traceBackDiagonals = (int64_t)traceBackDiagonals; break; case 'i': posteriorProbsFile2 = stString_copy(optarg); break; default: usage(); return 1; } } (void) j; // silence unused variable warning. // check for models if ((templateModelFile == NULL) || (complementModelFile == NULL && twoD)) { st_errAbort("Missing model files, exiting\n"); return 1; } if ((outFmt == 3) & (posteriorProbsFile2 == NULL)) { st_errAbort("Must pass in posteriorProbsFile2 if using 'both' outFmt\n"); return 1; } if (exonerateCigarFile == NULL) { st_errAbort("[signalMachine]ERROR: Need to provide input guide alignments, exiting\n"); return 1; } // Anchors // // get pairwise alignment from stdin, in exonerate CIGAR format //FILE *fileHandleIn = stdin; if (!stFile_exists(exonerateCigarFile)) { st_errAbort("[signalMachine]ERROR: Didn't find input alignment file, looked %s\n", exonerateCigarFile); } else { st_uglyf("[signalMachine]NOTICE: Using guide alignments from %s\n", exonerateCigarFile); } FILE *fileHandleIn = fopen(exonerateCigarFile, "r"); // parse input CIGAR to get anchors struct PairwiseAlignment *pA; pA = cigarRead(fileHandleIn); fclose(fileHandleIn); // Alignment Parameters // // make the pairwise alignment parameters PairwiseAlignmentParameters *p = pairwiseAlignmentBandingParameters_construct(); p->threshold = threshold; p->constraintDiagonalTrim = constraintTrim; p->diagonalExpansion = diagExpansion; p->traceBackDiagonals = traceBackDiagonals; // HDP routines // // load HDPs NanoporeHDP *nHdpT, *nHdpC; // check if ((templateHdp != NULL) || (complementHdp != NULL)) { if ((templateHdp == NULL) || (complementHdp == NULL && twoD)) { st_errAbort("Need to have template and complement HDPs"); } if (sMtype != threeStateHdp) { sMtype = threeStateHdp; fprintf(stderr, "[signalAlign] - Using threeStateHdp stateMachine since you pass in an HDP file\n"); } else { fprintf(stderr, "[signalAlign] - using NanoporeHDPs\n"); } } #pragma omp parallel sections { { nHdpT = (templateHdp == NULL) ? NULL : deserialize_nhdp(templateHdp); } #pragma omp section { nHdpC = (complementHdp == NULL) ? NULL : deserialize_nhdp(complementHdp); } } // Nanopore Read // // load nanopore read NanoporeRead *npRead = nanopore_loadNanoporeReadFromFile(npReadFile); if (rna){ int64_t tmp = pA->start2; pA->start2 = npRead->templateReadLength - pA->end2; pA->end2 = npRead->templateReadLength - tmp; } ReferenceSequence *R; R = fastaHandler_ReferenceSequenceConstructFull(forward_reference_path, backward_reference_path, pA, sequence_name, rna); // constrain the event sequence to the positions given by the guide alignment Sequence *tEventSequence = makeEventSequenceFromPairwiseAlignment(npRead->templateEvents, pA->start2, pA->end2, (twoD ? npRead->templateEventMap : npRead->templateStrandEventMap)); Sequence *cEventSequence; if (twoD) { cEventSequence = makeEventSequenceFromPairwiseAlignment(npRead->complementEvents, pA->start2, pA->end2, npRead->complementEventMap); } else { cEventSequence = NULL; } // the aligned pairs start at (0,0) so we need to correct them based on the guide alignment later. // record the pre-zeroed alignment start and end coordinates here // for the events: int64_t tCoordinateShift = twoD ? npRead->templateEventMap[pA->start2] : npRead->templateStrandEventMap[pA->start2]; int64_t cCoordinateShift = twoD ? npRead->complementEventMap[pA->start2] : 0; // and for the reference: int64_t rCoordinateShift_t = pA->start1; int64_t rCoordinateShift_c = twoD ? pA->end1 : 0; bool forward = pA->strand1; // keep track of whether this is a forward mapped read or not stList *anchorPairs = signalUtils_guideAlignmentToRebasedAnchorPairs(pA, p); // pA gets modified here, no turning back if ((templateExpectationsFile != NULL) || (complementExpectationsFile != NULL)) { st_uglyf("Starting expectations routine\n"); // Expectation Routine // StateMachine *sMt = buildStateMachine(templateModelFile, npRead->templateParams, sMtype, nHdpT); // temporary way to 'turn off' estimates if I want to if (ESTIMATE_PARAMS) { //todo remove threshold, not used signalUtils_estimateNanoporeParams(sMt, npRead, &npRead->templateParams, ASSIGNMENT_THRESHOLD, signalUtils_templateOneDAssignmentsFromRead, nanopore_adjustTemplateEventsForDrift); } // make empty HMM to collect expectations Hmm *templateExpectations = hmmContinuous_getExpectationsHmm(sMt, p->threshold, 0.001, 0.001); // get expectations for template fprintf(stderr, "signalAlign - getting expectations for template\n"); getSignalExpectations(sMt, templateExpectations, tEventSequence, (twoD ? npRead->templateEventMap : npRead->templateStrandEventMap), pA->start2, R->getTemplateTargetSequence(R), p, anchorPairs, degenerate); if (sMtype == threeStateHdp) { fprintf(stderr, "signalAlign - got %" PRId64 " template HDP assignments\n", hmmContinuous_howManyAssignments(templateExpectations)); } // write to file fprintf(stderr, "signalAlign - writing expectations to file: %s\n", templateExpectationsFile); hmmContinuous_writeToFile(templateExpectationsFile, templateExpectations, sMtype); // get expectations for the complement StateMachine *sMc; Hmm *complementExpectations = NULL; if (twoD) { fprintf(stderr, "signalAlign - getting expectations for complement\n"); sMc = buildStateMachine(complementModelFile, npRead->complementParams, sMtype, nHdpC); if (ESTIMATE_PARAMS) { signalUtils_estimateNanoporeParams(sMc, npRead, &npRead->complementParams, ASSIGNMENT_THRESHOLD, signalUtils_complementOneDAssignmentsFromRead, nanopore_adjustComplementEventsForDrift); } complementExpectations = hmmContinuous_getExpectationsHmm(sMc, p->threshold, 0.001, 0.001); getSignalExpectations(sMc, complementExpectations, cEventSequence, npRead->complementEventMap, pA->start2, R->getComplementTargetSequence(R), p, anchorPairs, degenerate); if (sMtype == threeStateHdp) { fprintf(stderr, "signalAlign - got %"PRId64"complement HDP assignments\n", hmmContinuous_howManyAssignments(complementExpectations)); } // write to file fprintf(stderr, "signalAlign - writing expectations to file: %s\n", complementExpectationsFile); hmmContinuous_writeToFile(complementExpectationsFile, complementExpectations, sMtype); } stateMachine_destruct(sMt); signalUtils_ReferenceSequenceDestruct(R); hmmContinuous_destruct(templateExpectations, sMtype); nanopore_nanoporeReadDestruct(npRead); sequence_destruct(tEventSequence); pairwiseAlignmentBandingParameters_destruct(p); destructPairwiseAlignment(pA); stList_destruct(anchorPairs); if (twoD) { stateMachine_destruct(sMc); sequence_destruct(cEventSequence); hmmContinuous_destruct(complementExpectations, sMtype); } fprintf(stderr, "signalAlign - SUCCESS: finished alignment of query %s, exiting\n", readLabel); return 0; } else { // Alignment Procedure // // Template alignment fprintf(stderr, "signalAlign - starting template alignment\n"); // make template stateMachine StateMachine *sMt = buildStateMachine(templateModelFile, npRead->templateParams, sMtype, nHdpT); // re-estimate the nanoporeAdjustment parameters if (ESTIMATE_PARAMS) { signalUtils_estimateNanoporeParams(sMt, npRead, &npRead->templateParams, ASSIGNMENT_THRESHOLD, signalUtils_templateOneDAssignmentsFromRead, nanopore_adjustTemplateEventsForDrift); } if (sMtype == threeStateHdp) { stateMachine3_setModelToHdpExpectedValues(sMt, nHdpT); } stList *templateAlignedPairs = performSignalAlignment(sMt, tEventSequence, (twoD ? npRead->templateEventMap : npRead->templateStrandEventMap), pA->start2, R->getTemplateTargetSequence(R), p, anchorPairs, degenerate); double templatePosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(templateAlignedPairs); // sort stList_sort(templateAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing // write to file if (posteriorProbsFile != NULL) { outputAlignment(outFmt, posteriorProbsFile, readLabel, sMt, npRead->templateParams, npRead->templateEvents, R->getTemplateTargetSequence(R), forward, pA->contig1, tCoordinateShift, rCoordinateShift_t, templateAlignedPairs, templatePosteriorScore,template, rna, posteriorProbsFile2); } stList *complementAlignedPairs; double complementPosteriorScore = 0.0; StateMachine *sMc; if (twoD) { // Complement alignment fprintf(stderr, "signalAlign - starting complement alignment\n"); sMc = buildStateMachine(complementModelFile, npRead->complementParams, sMtype, nHdpC); if (ESTIMATE_PARAMS) { signalUtils_estimateNanoporeParams(sMc, npRead, &npRead->complementParams, ASSIGNMENT_THRESHOLD, signalUtils_complementOneDAssignmentsFromRead, nanopore_adjustComplementEventsForDrift); } if (sMtype == threeStateHdp) { stateMachine3_setModelToHdpExpectedValues(sMc, nHdpC); } complementAlignedPairs = performSignalAlignment(sMc, cEventSequence, npRead->complementEventMap, pA->start2, R->getComplementTargetSequence(R), p, anchorPairs, degenerate); complementPosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(complementAlignedPairs); // sort stList_sort(complementAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing // write to file if (posteriorProbsFile != NULL) { outputAlignment(outFmt, posteriorProbsFile, readLabel, sMc, npRead->complementParams, npRead->complementEvents, R->getComplementTargetSequence(R), forward, pA->contig1, cCoordinateShift, rCoordinateShift_c, complementAlignedPairs, complementPosteriorScore, complement, rna, posteriorProbsFile2); } } fprintf(stdout, "%s %"PRId64"\t%"PRId64"(%f)\t", readLabel, stList_length(anchorPairs), stList_length(templateAlignedPairs), templatePosteriorScore); if (twoD) { fprintf(stdout, "%"PRId64"(%f)\n", stList_length(complementAlignedPairs), complementPosteriorScore); } else { fprintf(stdout, "\n"); } // final alignment clean up destructPairwiseAlignment(pA); nanopore_nanoporeReadDestruct(npRead); signalUtils_ReferenceSequenceDestruct(R); stateMachine_destruct(sMt); sequence_destruct(tEventSequence); stList_destruct(templateAlignedPairs); if (twoD) { stateMachine_destruct(sMc); sequence_destruct(cEventSequence); stList_destruct(complementAlignedPairs); } fprintf(stderr, "signalAlign - SUCCESS: finished alignment of query %s, exiting\n", readLabel); } return 0; }
fci_contract.c
/* * Full CI */ #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "np_helper/np_helper.h" #include "fci.h" // for (16e,16o) ~ 11 MB buffer = 120 * 12870 * 8 #define STRB_BLKSIZE 112 /* * CPU timing of single thread can be estimated: * na*nb*nnorb*8(bytes)*5 / (mem_freq*64 (*2 if dual-channel mem)) * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) * where the 5 times memory accesses are 3 in prog_a_t1, prog0_b_t1, * spread_b_t1 and 2 in spread_a_t1 * * multi threads * na*nb*nnorb*8(bytes)*2 / (mem_freq*64 (*2 if dual-channel mem)) due to single thread * + na*nb*nnorb*8(bytes)*3 / max_mem_bandwidth due to N-thread * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) / num_threads */ /* *********************************************************** * * Need the permutation symmetry * h2e[i,j,k,l] = h2e[j,i,k,l] = h2e[i,j,l,k] = h2e[j,i,l,k] * *********************************************************** */ /* * optimize for OpenMP, to reduce memory/CPU data transfer * add software prefetch, it's especially important for OpenMP */ /* * For given stra_id, spread alpah-strings (which can propagate to stra_id) * into t1[:nstrb,nnorb] * str1-of-alpha -> create/annihilate -> str0-of-alpha * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_a_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci0 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *pt1, *pci; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pt1 = t1 + ia*bcount; pci = ci0 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { pt1[k] += pci[k]; } } else if (sign < 0) { for (k = 0; k < bcount; k++) { pt1[k] -= pci[k]; } } } } /* * For given stra_id, spread all beta-strings into t1[:nstrb,nnorb] * all str0-of-beta -> create/annihilate -> str1-of-beta * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_b_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { t1[ia*bcount+str0] += sign * pci[str1]; } } tab += nlinkb; } } /* * spread t1 into ci1 */ void FCIspread_a_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci1 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*bcount; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } void FCIspread_b_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci1 + stra_id * (size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { pci[str1] += sign * t1[ia*bcount+str0]; } } tab += nlinkb; } } /* * f1e_tril is the 1e hamiltonian for spin alpha */ void FCIcontract_a_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci0, *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinka * nstra); FCIcompress_link_tril(clink, link_indexa, nstra, nlinka); for (str0 = 0; str0 < nstra; str0++) { tab = clink + str0 * nlinka; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci0 = ci0 + str0 * nstrb; pci1 = ci1 + str1 * nstrb; tmp = sign * f1e_tril[ia]; for (k = 0; k < nstrb; k++) { pci1[k] += tmp * pci0[k]; } } } free(clink); } /* * f1e_tril is the 1e hamiltonian for spin beta */ void FCIcontract_b_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinkb * nstrb); FCIcompress_link_tril(clink, link_indexb, nstrb, nlinkb); for (str0 = 0; str0 < nstra; str0++) { pci1 = ci1 + str0 * nstrb; for (k = 0; k < nstrb; k++) { tab = clink + k * nlinkb; tmp = ci0[str0*nstrb+k]; for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1[str1] += sign * tmp * f1e_tril[ia]; } } } free(clink); } void FCIcontract_1e_spin0(double *f1e_tril, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { memset(ci1, 0, sizeof(double)*na*na); FCIcontract_a_1e(f1e_tril, ci0, ci1, norb, na, na, nlink, nlink, link_index, link_index); } /* * spread t1 into ci1buf */ static void spread_bufa_t1(double *ci1, double *t1, int nrow_t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*nrow_t1; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } /* * bcount_for_spread_a is different for spin1 and spin0 */ static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount_for_spread_a, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); //FCIspread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0, // norb, ncol_ci1buf, nlinka, clink_indexa); spread_bufa_t1(ci1buf, vt1, bcount, bcount_for_spread_a, stra_id, 0, norb, ncol_ci1buf, nlinka, clink_indexa); } void FCIaxpy2d(double *out, double *in, size_t count, size_t no, size_t ni) { int i, j; for (i = 0; i < count; i++) { for (j = 0; j < ni; j++) { out[i*no+j] += in[i*ni+j]; } } } /* * nlink = nocc*nvir, num. all possible strings that a string can link to * link_index[str0] == linking map between str0 and other strings * link_index[str0][ith-linking-string] == * [tril(creation_op,annihilation_op),0,linking-string-id,sign] * FCIcontract_2e_spin0 only compute half of the contraction, due to the * symmetry between alpha and beta spin. The right contracted ci vector * is (ci1+ci1.T) */ void FCIcontract_2e_spin0(double *eri, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlink * na); FCIcompress_link_tril(clink, link_index, na, nlink); memset(ci1, 0, sizeof(double)*na*na); double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri, ci0, ci1, norb, na, nlink, clink, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1)); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < na; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, na-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static, 112) /* strk starts from MAX(strk0, ib), because [0:ib,0:ib] have been evaluated */ for (strk = ib; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, MIN(STRB_BLKSIZE, strk-ib), blen, MIN(STRB_BLKSIZE, strk+1-ib), strk, ib, norb, na, na, nlink, nlink, clink, clink); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, na, blen); } free(ci1buf); free(t1buf); } free(clink); } void FCIcontract_2e_spin1(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \ clinka, clinkb, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1)); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, blen, blen, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); } free(ci1buf); free(t1buf); } free(clinka); free(clinkb); } /* * eri_ab is mixed integrals (alpha,alpha|beta,beta), |beta,beta) in small strides */ static void ctr_uhf2e_kern(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1a = t1buf; double *t1b = t1a + nnorb*bcount; double *vt1 = t1b + nnorb*bcount; memset(t1a, 0, sizeof(double)*nnorb*bcount); memset(t1b, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1a, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1b, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_T, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_ab, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_bb, &nnorb, &D1, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_aa, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_ab, &nnorb, &D1, vt1, &bcount); FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0, norb, bcount, nlinka, clink_indexa); } void FCIcontract_uhf2e(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri_aa, eri_ab, eri_bb, ci0, ci1, norb, na, nb, nlinka, nlinkb,\ clinka, clinkb, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1)*2); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_uhf2e_kern(eri_aa, eri_ab, eri_bb, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); } free(t1buf); free(ci1buf); } free(clinka); free(clinkb); } /************************************************* * hdiag *************************************************/ void FCImake_hdiag_uhf(double *hdiag, double *h1e_a, double *h1e_b, double *jdiag_aa, double *jdiag_ab, double *jdiag_bb, double *kdiag_aa, double *kdiag_bb, int norb, int nstra, int nstrb, int nocca, int noccb, int *occslista, int *occslistb) { #pragma omp parallel default(none) \ shared(hdiag, h1e_a, h1e_b, \ jdiag_aa, jdiag_ab, jdiag_bb, kdiag_aa, kdiag_bb, \ norb, nstra, nstrb, nocca, noccb, occslista, occslistb) { int j, j0, k0, jk, jk0; size_t ia, ib; double e1, e2; int *paocc, *pbocc; #pragma omp for schedule(static) for (ia = 0; ia < nstra; ia++) { paocc = occslista + ia * nocca; for (ib = 0; ib < nstrb; ib++) { e1 = 0; e2 = 0; pbocc = occslistb + ib * noccb; for (j0 = 0; j0 < nocca; j0++) { j = paocc[j0]; jk0 = j * norb; e1 += h1e_a[j*norb+j]; for (k0 = 0; k0 < nocca; k0++) { // (alpha|alpha) jk = jk0 + paocc[k0]; e2 += jdiag_aa[jk] - kdiag_aa[jk]; } for (k0 = 0; k0 < noccb; k0++) { // (alpha|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_ab[jk] * 2; } } for (j0 = 0; j0 < noccb; j0++) { j = pbocc[j0]; jk0 = j * norb; e1 += h1e_b[j*norb+j]; for (k0 = 0; k0 < noccb; k0++) { // (beta|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_bb[jk] - kdiag_bb[jk]; } } hdiag[ia*nstrb+ib] = e1 + e2 * .5; } } } } void FCImake_hdiag(double *hdiag, double *h1e, double *jdiag, double *kdiag, int norb, int na, int nocc, int *occslst) { FCImake_hdiag_uhf(hdiag, h1e, h1e, jdiag, jdiag, jdiag, kdiag, kdiag, norb, na, na, nocc, nocc, occslst, occslst); } static int first1(uint64_t r) { #ifdef HAVE_FFS return ffsll(r) - 1; #else int n = 0; if (r >> (n + 32)) n += 32; if (r >> (n + 16)) n += 16; if (r >> (n + 8)) n += 8; if (r >> (n + 4)) n += 4; if (r >> (n + 2)) n += 2; if (r >> (n + 1)) n += 1; return n; #endif } /************************************************* * pspace Hamiltonian, ref CPL, 169, 463 *************************************************/ /* * sub-space Hamiltonian (tril part) of the determinants (stra,strb) */ void FCIpspace_h0tril_uhf(double *h0, double *h1e_a, double *h1e_b, double *g2e_aa, double *g2e_ab, double *g2e_bb, uint64_t *stra, uint64_t *strb, int norb, int np) { const int d2 = norb * norb; const int d3 = norb * norb * norb; #pragma omp parallel default(none) \ shared(h0, h1e_a, h1e_b, g2e_aa, g2e_ab, g2e_bb, \ stra, strb, norb, np) { int i, j, k, pi, pj, pk, pl; int n1da, n1db; uint64_t da, db, str1; double tmp; #pragma omp for schedule(dynamic) for (i = 0; i < np; i++) { for (j = 0; j < i; j++) { da = stra[i] ^ stra[j]; db = strb[i] ^ strb[j]; n1da = FCIpopcount_1(da); n1db = FCIpopcount_1(db); switch (n1da) { case 0: switch (n1db) { case 2: pi = first1(db & strb[i]); pj = first1(db & strb[j]); tmp = h1e_b[pi*norb+pj]; for (k = 0; k < norb; k++) { if (stra[i] & (1ULL<<k)) { tmp += g2e_ab[pi*norb+pj+k*d3+k*d2]; } if (strb[i] & (1ULL<<k)) { tmp += g2e_bb[pi*d3+pj*d2+k*norb+k] - g2e_bb[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, strb[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 4: pi = first1(db & strb[i]); pj = first1(db & strb[j]); pk = first1((db & strb[i]) ^ (1ULL<<pi)); pl = first1((db & strb[j]) ^ (1ULL<<pj)); str1 = strb[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, strb[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_bb[pi*d3+pj*d2+pk*norb+pl] - g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_bb[pi*d3+pj*d2+pk*norb+pl] + g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } } break; case 2: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); tmp = h1e_a[pi*norb+pj]; for (k = 0; k < norb; k++) { if (strb[i] & (1ULL<<k)) { tmp += g2e_ab[pi*d3+pj*d2+k*norb+k]; } if (stra[i] & (1ULL<<k)) { tmp += g2e_aa[pi*d3+pj*d2+k*norb+k] - g2e_aa[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, stra[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 2: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1(db & strb[i]); pl = first1(db & strb[j]); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, strb[j]) > 0) { h0[i*np+j] = g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } else { h0[i*np+j] =-g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } } break; case 4: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1((da & stra[i]) ^ (1ULL<<pi)); pl = first1((da & stra[j]) ^ (1ULL<<pj)); str1 = stra[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_aa[pi*d3+pj*d2+pk*norb+pl] - g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_aa[pi*d3+pj*d2+pk*norb+pl] + g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } } break; } } } } } void FCIpspace_h0tril(double *h0, double *h1e, double *g2e, uint64_t *stra, uint64_t *strb, int norb, int np) { FCIpspace_h0tril_uhf(h0, h1e, h1e, g2e, g2e, g2e, stra, strb, norb, np); } /*********************************************************************** * * With symmetry * * Note the ordering in eri and the index in link_index * eri is a tril matrix, it should be reordered wrt the irrep of the * direct product E_i^j. The 2D array eri(ij,kl) is a diagonal block * matrix. Each block is associated with an irrep. * link_index[str_id,pair_id,0] which is the index of pair_id, should be * reordered wrt the irreps accordingly * * dimirrep stores the number of occurence for each irrep * ***********************************************************************/ static void pick_link_by_irrep(_LinkTrilT *clink, int *link_index, int nstr, int nlink, int eri_irrep) { int i, j, k; for (i = 0; i < nstr; i++) { for (k = 0, j = 0; k < nlink; k++) { if (link_index[k*4+1] == eri_irrep) { clink[j].ia = link_index[k*4+0]; clink[j].addr = link_index[k*4+2]; clink[j].sign = link_index[k*4+3]; j++; } } if (j < nlink) { clink[j].sign = 0; } clink += nlink; link_index += nlink * 4; } } static void ctr_rhf2esym_kern1(double *eri, double *ci0, double *ci1ab, double *ci1buf, double *t1buf, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int nnorb, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, 0, nb, nlinka, clink_indexa); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1ab, vt1, bcount, stra_id, strb_id, 0, nb_intermediate, nlinkb, clink_indexb); spread_bufa_t1(ci1buf, vt1, bcount, bcount, stra_id, 0, 0, ncol_ci1buf, nlinka, clink_indexa); } static void loop_c2e_symm1(double *eri, double *ci0, double *ci1aa, double *ci1ab, int nnorb, int na_intermediate, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clinka, _LinkTrilT *clinkb) { double *ci1bufs[MAX_THREADS]; #pragma omp parallel default(none) \ shared(eri, ci0, ci1aa, ci1ab, nnorb, na, nb, nlinka, nlinkb, \ na_intermediate, nb_intermediate, clinka, clinkb, ci1bufs) { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*nnorb*2); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na_intermediate; strk++) { ctr_rhf2esym_kern1(eri, ci0, ci1ab, ci1buf, t1buf, blen, blen, strk, ib, nnorb, nb_intermediate, na, nb, nlinka, nlinkb, clinka, clinkb); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1aa+ib, ci1buf, na, nb, blen); } free(ci1buf); free(t1buf); } } #define TOTIRREPS 8 void FCIcontract_2e_symm1(double **eris, double **ci0, double **ci1, int norb, int *nas, int *nbs, int nlinka, int nlinkb, int **linka, int **linkb, int *dimirrep, int wfnsym) { int i; int na = 0; int nb = 0; for (i = 0; i < TOTIRREPS; i++) { na = MAX(nas[i], na); nb = MAX(nbs[i], nb); } _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); int ai_ir, stra_ir, strb_ir, intera_ir, interb_ir, ma, mb; for (stra_ir = 0; stra_ir < TOTIRREPS; stra_ir++) { for (ai_ir = 0; ai_ir < TOTIRREPS; ai_ir++) { strb_ir = wfnsym^stra_ir; ma = nas[stra_ir]; mb = nbs[strb_ir]; if (ma > 0 && mb > 0 && dimirrep[ai_ir] > 0) { intera_ir = ai_ir^stra_ir; interb_ir = ai_ir^strb_ir; // clinka for inter_ir*ai_ir -> stra_ir pick_link_by_irrep(clinka, linka[intera_ir], nas[intera_ir], nlinka, ai_ir); // clinka for strb_ir*ai_ir -> inter_ir pick_link_by_irrep(clinkb, linkb[strb_ir], nbs[strb_ir], nlinkb, ai_ir); loop_c2e_symm1(eris[ai_ir], ci0[stra_ir], ci1[stra_ir], ci1[intera_ir], dimirrep[ai_ir], nas[intera_ir], nbs[interb_ir], ma, mb, nlinka, nlinkb, clinka, clinkb); } } } free(clinka); free(clinkb); }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (1024*3) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double S[N]; double p[2]; INIT(); long cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int max_threads = 224; #undef FOR_CLAUSES #define FOR_CLAUSES #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: private clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES private(p,q) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ } , { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1)))) } // // Test: firstprivate clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES firstprivate(p,q) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ } , { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: lastprivate clause on omp for. // double q0[1], q1[1], q2[1], q3[1], q4[1], q5[1], q6[1], q7[1], q8[1], q9[1]; for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TEST({ S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } _Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for lastprivate(q0)") for (int i = 0; i < N; i++) { q0[0] = C[i] + D[i]; A[i] += q0[0]; } _Pragma("omp for schedule(auto) lastprivate(q1)") for (int i = 0; i < N; i++) { q1[0] = D[i] + E[i]; B[i] += q1[0]; } _Pragma("omp for schedule(dynamic) lastprivate(q2)") for (int i = 0; i < N; i++) { q2[0] = C[i] + D[i]; A[i] += q2[0]; } _Pragma("omp for schedule(guided) lastprivate(q3)") for (int i = 0; i < N; i++) { q3[0] = D[i] + E[i]; B[i] += q3[0]; } _Pragma("omp for schedule(runtime) lastprivate(q4)") for (int i = 0; i < N; i++) { q4[0] = C[i] + D[i]; A[i] += q4[0]; } _Pragma("omp for schedule(static) lastprivate(q5)") for (int i = 0; i < N; i++) { q5[0] = D[i] + E[i]; B[i] += q5[0]; } _Pragma("omp for schedule(static,1) lastprivate(q6)") for (int i = 0; i < N; i++) { q6[0] = C[i] + D[i]; A[i] += q6[0]; } _Pragma("omp for schedule(static,9) lastprivate(q7)") for (int i = 0; i < N; i++) { q7[0] = D[i] + E[i]; B[i] += q7[0]; } _Pragma("omp for schedule(static,13) lastprivate(q8)") for (int i = 0; i < N; i++) { q8[0] = C[i] + D[i]; A[i] += q8[0]; } _Pragma("omp for schedule(static,30000) lastprivate(q9)") for (int i = 0; i < N; i++) { q9[0] = D[i] + E[i]; B[i] += q9[0]; } } double tmp = q0[0] + q1[0] + q2[0] + q3[0] + q4[0] + \ q5[0] + q6[0] + q7[0] + q8[0] + q9[0]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 5 * (N + (N/2*(N+1))) )); } // // Test: private clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES private(p) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( p[0] = 2; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ p[1] = D[i] + E[i]; \ A[i] += p[0]; \ B[i] += p[1]; \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1)))) } // // Test: firstprivate clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES firstprivate(p) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( p[0] = -4; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p[0]; \ B[i] += D[i] + E[i] + p[1]; \ if (i == N-1) { \ p[0] += 6; \ p[1] += 9; \ } \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: collapse clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES collapse(2) #include "defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; PARALLEL( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: ordered clause on omp for. // #undef FOR_CLAUSES #define FOR_CLAUSES ordered #include "defines.h" for (int t = 0; t <= max_threads; t += max_threads) { int threads[1]; threads[0] = t; PARALLEL( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } // // Test: nowait clause on omp for. // FIXME: Not sure how to test for correctness. // for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TEST({ S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } _Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for nowait schedule(static,1)") for (int i = 0; i < N; i++) { A[i] = C[i] + D[i]; } _Pragma("omp for nowait schedule(static,1)") for (int i = 0; i < N; i++) { B[i] = A[i] + D[i] + E[i]; } _Pragma("omp barrier") if (omp_get_thread_num() == 0) { double tmp = 0; for (int i = 0; i < N; i++) { tmp += B[i]; } S[0] += tmp; } } }, VERIFY(0, 1, S[0], (N/2*(N+1)) )); } // // Test: Ensure coalesced scheduling on GPU. // if (!cpuExec) { TEST({ S[0] = 0; for (int i = 0; i < 99; i++) { A[i] = 0; } _Pragma("omp parallel num_threads(33)") { _Pragma("omp for") for (int i = 0; i < 99; i++) { A[i] += i - omp_get_thread_num(); } _Pragma("omp for schedule(auto)") for (int i = 0; i < 99; i++) { A[i] += i - omp_get_thread_num(); } _Pragma("omp for schedule(static,1)") for (int i = 0; i < 99; i++) { A[i] += i - omp_get_thread_num(); } } double tmp = 0; for (int i = 0; i < 99; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], 3 * (33*33 + 66*33) )); } else { DUMP_SUCCESS(1); } // // Test: Ensure that we have barriers after dynamic, guided, // and ordered schedules, even with a nowait clause since the // NVPTX runtime doesn't currently support concurrent execution // of these constructs. // FIXME: Not sure how to test for correctness at runtime. // if (!cpuExec) { TEST({ for (int i = 0; i < N; i++) { A[i] = 0; } _Pragma("omp parallel") { _Pragma("omp for nowait schedule(guided)") for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; } _Pragma("omp for nowait schedule(dynamic)") for (int i = 0; i < N; i++) { A[i] += D[i] + E[i]; } _Pragma("omp for nowait ordered") for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; } } }, VERIFY(0, N, A[i], 2*i+2) ); } else { DUMP_SUCCESS(1); } // // Test: Linear clause on target // if (!cpuExec) { int l = 0; ZERO(A); #pragma omp target map(tofrom:A) #pragma omp parallel for linear(l:2) for(int i = 0 ; i < 10 ; i++) A[i] = l; int fail = 0; for(int i = 0 ; i < 10 ; i++) if(A[i] != i*2) { printf("error at %d, val = %lf expected = %d\n", i, A[i], i*2); fail = 1; } if(fail) printf("Error\n"); else printf("Succeeded\n"); } else { DUMP_SUCCESS(1); } return 0; }
dynwave.c
//----------------------------------------------------------------------------- // dynwave.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (5.1.001) // 03/28/14 (5.1.002) // 09/15/14 (5.1.007) // 03/19/15 (5.1.008) // 08/01/16 (5.1.011) // Author: L. Rossman (EPA) // M. Tryby (EPA) // R. Dickinson (CDM) // // Dynamic wave flow routing functions. // // This module solves the dynamic wave flow routing equations using // Picard Iterations (i.e., a method of successive approximations) // to solve the explicit form of the continuity and momentum equations // for conduits. // // Build 5.1.002: // - Only non-ponded nodal surface area is saved for use in // surcharge algorithm. // // Build 5.1.007: // - Node losses added to node outflow variable instead of treated // as a separate item when computing change in node flow volume. // // Build 5.1.008: // - Module-specific constants moved here from project.c. // - Support added for user-specified minimum variable time step. // - Node crown elevations found here instead of in flowrout.c module. // - OpenMP use to parallelize findLinkFlows() & findNodeDepths(). // - Bug in finding complete list of capacity limited links fixed. // // Build 5.1.011: // - Added test for failed memory allocation. // - Fixed illegal array index bug for Ideal Pumps. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include "headers.h" #include <malloc.h> #include <math.h> #include <omp.h> //(5.1.008) //----------------------------------------------------------------------------- // Constants //----------------------------------------------------------------------------- static const double MINTIMESTEP = 0.001; // min. time step (sec) //(5.1.008) static const double OMEGA = 0.5; // under-relaxation parameter // Constants moved here from project.c // //(5.1.008) const double DEFAULT_SURFAREA = 12.566; // Min. nodal surface area (~4 ft diam.) const double DEFAULT_HEADTOL = 0.005; // Default head tolerance (ft) const int DEFAULT_MAXTRIALS = 8; // Max. trials per time step //----------------------------------------------------------------------------- // Data Structures //----------------------------------------------------------------------------- typedef struct { char converged; // TRUE if iterations for a node done double newSurfArea; // current surface area (ft2) double oldSurfArea; // previous surface area (ft2) double sumdqdh; // sum of dqdh from adjoining links double dYdT; // change in depth w.r.t. time (ft/sec) } TXnode; //----------------------------------------------------------------------------- // Shared Variables //----------------------------------------------------------------------------- static double VariableStep; // size of variable time step (sec) static TXnode* Xnode; // extended nodal information static double Omega; // actual under-relaxation parameter static int Steps; // number of Picard iterations //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initRoutingStep(void); static void initNodeStates(void); static void findBypassedLinks(); static void findLimitedLinks(); static void findLinkFlows(double dt); static int isTrueConduit(int link); static void findNonConduitFlow(int link, double dt); static void findNonConduitSurfArea(int link); static double getModPumpFlow(int link, double q, double dt); static void updateNodeFlows(int link); static int findNodeDepths(double dt); static void setNodeDepth(int node, double dt); static double getFloodedDepth(int node, int canPond, double dV, double yNew, double yMax, double dt); static double getVariableStep(double maxStep); static double getLinkStep(double tMin, int *minLink); static double getNodeStep(double tMin, int *minNode); //============================================================================= //// This function was modified for release 5.1.008. //// //(5.1.008) void dynwave_init() // // Input: none // Output: none // Purpose: initializes dynamic wave routing method. // { int i, j; double z; VariableStep = 0.0; Xnode = (TXnode *) calloc(Nobjects[NODE], sizeof(TXnode)); //// Added to release 5.1.011. //// //(5.1.011) if ( Xnode == NULL ) { report_writeErrorMsg(ERR_MEMORY, " Not enough memory for dynamic wave routing."); return; } ////////////////////////////////////// // --- initialize node surface areas & crown elev. for (i = 0; i < Nobjects[NODE]; i++ ) { Xnode[i].newSurfArea = 0.0; Xnode[i].oldSurfArea = 0.0; Node[i].crownElev = Node[i].invertElev; } // --- update node crown elev. & initialize links for (i = 0; i < Nobjects[LINK]; i++) { j = Link[i].node1; z = Node[j].invertElev + Link[i].offset1 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); j = Link[i].node2; z = Node[j].invertElev + Link[i].offset2 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); Link[i].flowClass = DRY; Link[i].dqdh = 0.0; } } //============================================================================= void dynwave_close() // // Input: none // Output: none // Purpose: frees memory allocated for dynamic wave routing method. // { FREE(Xnode); } //============================================================================= //// New function added to release 5.1.008. //// //(5.1.008) void dynwave_validate() // // Input: none // Output: none // Purpose: adjusts dynamic wave routing options. // { if ( MinRouteStep > RouteStep ) MinRouteStep = RouteStep; if ( MinRouteStep < MINTIMESTEP ) MinRouteStep = MINTIMESTEP; if ( MinSurfArea == 0.0 ) MinSurfArea = DEFAULT_SURFAREA; else MinSurfArea /= UCF(LENGTH) * UCF(LENGTH); if ( HeadTol == 0.0 ) HeadTol = DEFAULT_HEADTOL; else HeadTol /= UCF(LENGTH); if ( MaxTrials == 0 ) MaxTrials = DEFAULT_MAXTRIALS; } //============================================================================= double dynwave_getRoutingStep(double fixedStep) // // Input: fixedStep = user-supplied fixed time step (sec) // Output: returns routing time step (sec) // Purpose: computes variable routing time step if applicable. // { // --- use user-supplied fixed step if variable step option turned off // or if its smaller than the min. allowable variable time step if ( CourantFactor == 0.0 ) return fixedStep; if ( fixedStep < MINTIMESTEP ) return fixedStep; // --- at start of simulation (when current variable step is zero) // use the minimum allowable time step if ( VariableStep == 0.0 ) { VariableStep = MinRouteStep; //(5.1.008) } // --- otherwise compute variable step based on current flow solution else VariableStep = getVariableStep(fixedStep); // --- adjust step to be a multiple of a millisecond VariableStep = floor(1000.0 * VariableStep) / 1000.0; return VariableStep; } //============================================================================= int dynwave_execute(double tStep) // // Input: links = array of topo sorted links indexes // tStep = time step (sec) // Output: returns number of iterations used // Purpose: routes flows through drainage network over current time step. // { int converged; // --- initialize if ( ErrorCode ) return 0; Steps = 0; converged = FALSE; Omega = OMEGA; initRoutingStep(); // --- keep iterating until convergence while ( Steps < MaxTrials ) { // --- execute a routing step & check for nodal convergence initNodeStates(); findLinkFlows(tStep); converged = findNodeDepths(tStep); Steps++; if ( Steps > 1 ) { if ( converged ) break; // --- check if link calculations can be skipped in next step findBypassedLinks(); } } if ( !converged ) NonConvergeCount++; // --- identify any capacity-limited conduits findLimitedLinks(); return Steps; } //============================================================================= void initRoutingStep() { int i; for (i = 0; i < Nobjects[NODE]; i++) { Xnode[i].converged = FALSE; Xnode[i].dYdT = 0.0; } for (i = 0; i < Nobjects[LINK]; i++) { Link[i].bypassed = FALSE; Link[i].surfArea1 = 0.0; Link[i].surfArea2 = 0.0; } // --- a2 preserves conduit area from solution at last time step for ( i = 0; i < Nlinks[CONDUIT]; i++) Conduit[i].a2 = Conduit[i].a1; } //============================================================================= void initNodeStates() // // Input: none // Output: none // Purpose: initializes node's surface area, inflow & outflow // { int i; for (i = 0; i < Nobjects[NODE]; i++) { // --- initialize nodal surface area if ( AllowPonding ) { Xnode[i].newSurfArea = node_getPondedArea(i, Node[i].newDepth); } else { Xnode[i].newSurfArea = node_getSurfArea(i, Node[i].newDepth); } if ( Xnode[i].newSurfArea < MinSurfArea ) { Xnode[i].newSurfArea = MinSurfArea; } //// Following code section modified for release 5.1.007 //// //(5.1.007) // --- initialize nodal inflow & outflow Node[i].inflow = 0.0; Node[i].outflow = Node[i].losses; if ( Node[i].newLatFlow >= 0.0 ) { Node[i].inflow += Node[i].newLatFlow; } else { Node[i].outflow -= Node[i].newLatFlow; } Xnode[i].sumdqdh = 0.0; } } //============================================================================= void findBypassedLinks() { int i; for (i = 0; i < Nobjects[LINK]; i++) { if ( Xnode[Link[i].node1].converged && Xnode[Link[i].node2].converged ) Link[i].bypassed = TRUE; else Link[i].bypassed = FALSE; } } //============================================================================= void findLimitedLinks() // // Input: none // Output: none // Purpose: determines if a conduit link is capacity limited. // { int j, n1, n2, k; double h1, h2; for (j = 0; j < Nobjects[LINK]; j++) { // ---- check only non-dummy conduit links if ( !isTrueConduit(j) ) continue; //(5.1.008) // --- check that upstream end is full k = Link[j].subIndex; Conduit[k].capacityLimited = FALSE; if ( Conduit[k].a1 >= Link[j].xsect.aFull ) { // --- check if HGL slope > conduit slope n1 = Link[j].node1; n2 = Link[j].node2; h1 = Node[n1].newDepth + Node[n1].invertElev; h2 = Node[n2].newDepth + Node[n2].invertElev; if ( (h1 - h2) > fabs(Conduit[k].slope) * Conduit[k].length ) Conduit[k].capacityLimited = TRUE; } } } //============================================================================= void findLinkFlows(double dt) { int i; // --- find new flow in each non-dummy conduit #pragma omp parallel num_threads(NumThreads) //(5.1.008) { #pragma omp for //(5.1.008) for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) && !Link[i].bypassed ) dwflow_findConduitFlow(i, Steps, Omega, dt); } } // --- update inflow/outflows for nodes attached to non-dummy conduits for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) ) updateNodeFlows(i); } // --- find new flows for all dummy conduits, pumps & regulators for ( i = 0; i < Nobjects[LINK]; i++) { if ( !isTrueConduit(i) ) { if ( !Link[i].bypassed ) findNonConduitFlow(i, dt); updateNodeFlows(i); } } } //============================================================================= int isTrueConduit(int j) { return ( Link[j].type == CONDUIT && Link[j].xsect.type != DUMMY ); } //============================================================================= void findNonConduitFlow(int i, double dt) // // Input: i = link index // dt = time step (sec) // Output: none // Purpose: finds new flow in a non-conduit-type link // { double qLast; // previous link flow (cfs) double qNew; // new link flow (cfs) // --- get link flow from last iteration qLast = Link[i].newFlow; Link[i].dqdh = 0.0; // --- get new inflow to link from its upstream node // (link_getInflow returns 0 if flap gate closed or pump is offline) qNew = link_getInflow(i); if ( Link[i].type == PUMP ) qNew = getModPumpFlow(i, qNew, dt); // --- find surface area at each end of link findNonConduitSurfArea(i); // --- apply under-relaxation with flow from previous iteration; // --- do not allow flow to change direction without first being 0 if ( Steps > 0 && Link[i].type != PUMP ) { qNew = (1.0 - Omega) * qLast + Omega * qNew; if ( qNew * qLast < 0.0 ) qNew = 0.001 * SGN(qNew); } Link[i].newFlow = qNew; } //============================================================================= double getModPumpFlow(int i, double q, double dt) // // Input: i = link index // q = pump flow from pump curve (cfs) // dt = time step (sec) // Output: returns modified pump flow rate (cfs) // Purpose: modifies pump curve pumping rate depending on amount of water // available at pump's inlet node. // { int j = Link[i].node1; // pump's inlet node index int k = Link[i].subIndex; // pump's index double newNetInflow; // inflow - outflow rate (cfs) double netFlowVolume; // inflow - outflow volume (ft3) double y; // node depth (ft) if ( q == 0.0 ) return q; // --- case where inlet node is a storage node: // prevent node volume from going negative if ( Node[j].type == STORAGE ) return node_getMaxOutflow(j, q, dt); // --- case where inlet is a non-storage node switch ( Pump[k].type ) { // --- for Type1 pump, a volume is computed for inlet node, // so make sure it doesn't go negative case TYPE1_PUMP: return node_getMaxOutflow(j, q, dt); // --- for other types of pumps, if pumping rate would make depth // at upstream node negative, then set pumping rate = inflow case TYPE2_PUMP: case TYPE4_PUMP: case TYPE3_PUMP: newNetInflow = Node[j].inflow - Node[j].outflow - q; netFlowVolume = 0.5 * (Node[j].oldNetInflow + newNetInflow ) * dt; y = Node[j].oldDepth + netFlowVolume / Xnode[j].newSurfArea; if ( y <= 0.0 ) return Node[j].inflow; } return q; } //============================================================================= void findNonConduitSurfArea(int i) // // Input: i = link index // Output: none // Purpose: finds the surface area contributed by a non-conduit // link to its upstream and downstream nodes. // { if ( Link[i].type == ORIFICE ) { Link[i].surfArea1 = Orifice[Link[i].subIndex].surfArea / 2.; } // --- no surface area for weirs to maintain SWMM 4 compatibility /* else if ( Link[i].type == WEIR ) { Xlink[i].surfArea1 = Weir[Link[i].subIndex].surfArea / 2.; } */ else Link[i].surfArea1 = 0.0; Link[i].surfArea2 = Link[i].surfArea1; if ( Link[i].flowClass == UP_CRITICAL || Node[Link[i].node1].type == STORAGE ) Link[i].surfArea1 = 0.0; if ( Link[i].flowClass == DN_CRITICAL || Node[Link[i].node2].type == STORAGE ) Link[i].surfArea2 = 0.0; } //============================================================================= void updateNodeFlows(int i) // // Input: i = link index // q = link flow rate (cfs) // Output: none // Purpose: updates cumulative inflow & outflow at link's end nodes. // { int k; //(5.1.011) int barrels = 1; int n1 = Link[i].node1; int n2 = Link[i].node2; double q = Link[i].newFlow; double uniformLossRate = 0.0; // --- compute any uniform seepage loss from a conduit if ( Link[i].type == CONDUIT ) { k = Link[i].subIndex; uniformLossRate = Conduit[k].evapLossRate + Conduit[k].seepLossRate; barrels = Conduit[k].barrels; } // --- update total inflow & outflow at upstream/downstream nodes if ( q >= 0.0 ) { Node[n1].outflow += q + uniformLossRate; Node[n2].inflow += q; } else { Node[n1].inflow -= q; Node[n2].outflow -= q - uniformLossRate; } // --- add surf. area contributions to upstream/downstream nodes Xnode[Link[i].node1].newSurfArea += Link[i].surfArea1 * barrels; Xnode[Link[i].node2].newSurfArea += Link[i].surfArea2 * barrels; // --- update summed value of dqdh at each end node Xnode[Link[i].node1].sumdqdh += Link[i].dqdh; if ( Link[i].type == PUMP ) { k = Link[i].subIndex; if ( Pump[k].type != TYPE4_PUMP ) //(5.1.011) { Xnode[n2].sumdqdh += Link[i].dqdh; } } else Xnode[n2].sumdqdh += Link[i].dqdh; } //============================================================================= int findNodeDepths(double dt) { int i; int converged; // convergence flag double yOld; // previous node depth (ft) // --- compute outfall depths based on flow in connecting link for ( i = 0; i < Nobjects[LINK]; i++ ) link_setOutfallDepth(i); // --- compute new depth for all non-outfall nodes and determine if // depth change from previous iteration is below tolerance converged = TRUE; #pragma omp parallel num_threads(NumThreads) //(5.1.008) { #pragma omp for private(yOld) //(5.1.008) for ( i = 0; i < Nobjects[NODE]; i++ ) { if ( Node[i].type == OUTFALL ) continue; yOld = Node[i].newDepth; setNodeDepth(i, dt); Xnode[i].converged = TRUE; if ( fabs(yOld - Node[i].newDepth) > HeadTol ) { converged = FALSE; Xnode[i].converged = FALSE; } } } //(5.1.008) return converged; } //============================================================================= void setNodeDepth(int i, double dt) // // Input: i = node index // dt = time step (sec) // Output: none // Purpose: sets depth at non-outfall node after current time step. // { int canPond; // TRUE if node can pond overflows int isPonded; // TRUE if node is currently ponded double dQ; // inflow minus outflow at node (cfs) double dV; // change in node volume (ft3) double dy; // change in node depth (ft) double yMax; // max. depth at node (ft) double yOld; // node depth at previous time step (ft) double yLast; // previous node depth (ft) double yNew; // new node depth (ft) double yCrown; // depth to node crown (ft) double surfArea; // node surface area (ft2) double denom; // denominator term double corr; // correction factor double f; // relative surcharge depth // --- see if node can pond water above it canPond = (AllowPonding && Node[i].pondedArea > 0.0); isPonded = (canPond && Node[i].newDepth > Node[i].fullDepth); // --- initialize values yCrown = Node[i].crownElev - Node[i].invertElev; yOld = Node[i].oldDepth; yLast = Node[i].newDepth; Node[i].overflow = 0.0; surfArea = Xnode[i].newSurfArea; // --- determine average net flow volume into node over the time step dQ = Node[i].inflow - Node[i].outflow; dV = 0.5 * (Node[i].oldNetInflow + dQ) * dt; // --- if node not surcharged, base depth change on surface area if ( yLast <= yCrown || Node[i].type == STORAGE || isPonded ) { dy = dV / surfArea; yNew = yOld + dy; // --- save non-ponded surface area for use in surcharge algorithm //(5.1.002) if ( !isPonded ) Xnode[i].oldSurfArea = surfArea; //(5.1.002) // --- apply under-relaxation to new depth estimate if ( Steps > 0 ) { yNew = (1.0 - Omega) * yLast + Omega * yNew; } // --- don't allow a ponded node to drop much below full depth if ( isPonded && yNew < Node[i].fullDepth ) yNew = Node[i].fullDepth - FUDGE; } // --- if node surcharged, base depth change on dqdh // NOTE: depth change is w.r.t depth from previous // iteration; also, do not apply under-relaxation. else { // --- apply correction factor for upstream terminal nodes corr = 1.0; if ( Node[i].degree < 0 ) corr = 0.6; // --- allow surface area from last non-surcharged condition // to influence dqdh if depth close to crown depth denom = Xnode[i].sumdqdh; if ( yLast < 1.25 * yCrown ) { f = (yLast - yCrown) / yCrown; denom += (Xnode[i].oldSurfArea/dt - Xnode[i].sumdqdh) * exp(-15.0 * f); } // --- compute new estimate of node depth if ( denom == 0.0 ) dy = 0.0; else dy = corr * dQ / denom; yNew = yLast + dy; if ( yNew < yCrown ) yNew = yCrown - FUDGE; // --- don't allow a newly ponded node to rise much above full depth if ( canPond && yNew > Node[i].fullDepth ) yNew = Node[i].fullDepth + FUDGE; } // --- depth cannot be negative if ( yNew < 0 ) yNew = 0.0; // --- determine max. non-flooded depth yMax = Node[i].fullDepth; if ( canPond == FALSE ) yMax += Node[i].surDepth; // --- find flooded depth & volume if ( yNew > yMax ) { yNew = getFloodedDepth(i, canPond, dV, yNew, yMax, dt); } else Node[i].newVolume = node_getVolume(i, yNew); // --- compute change in depth w.r.t. time Xnode[i].dYdT = fabs(yNew - yOld) / dt; // --- save new depth for node Node[i].newDepth = yNew; } //============================================================================= double getFloodedDepth(int i, int canPond, double dV, double yNew, double yMax, double dt) // // Input: i = node index // canPond = TRUE if water can pond over node // isPonded = TRUE if water is currently ponded // dV = change in volume over time step (ft3) // yNew = current depth at node (ft) // yMax = max. depth at node before ponding (ft) // dt = time step (sec) // Output: returns depth at node when flooded (ft) // Purpose: computes depth, volume and overflow for a flooded node. // { if ( canPond == FALSE ) { Node[i].overflow = dV / dt; Node[i].newVolume = Node[i].fullVolume; yNew = yMax; } else { Node[i].newVolume = MAX((Node[i].oldVolume+dV), Node[i].fullVolume); Node[i].overflow = (Node[i].newVolume - MAX(Node[i].oldVolume, Node[i].fullVolume)) / dt; } if ( Node[i].overflow < FUDGE ) Node[i].overflow = 0.0; return yNew; } //============================================================================= double getVariableStep(double maxStep) // // Input: maxStep = user-supplied max. time step (sec) // Output: returns time step (sec) // Purpose: finds time step that satisfies stability criterion but // is no greater than the user-supplied max. time step. // { int minLink = -1; // index of link w/ min. time step int minNode = -1; // index of node w/ min. time step double tMin; // allowable time step (sec) double tMinLink; // allowable time step for links (sec) double tMinNode; // allowable time step for nodes (sec) // --- find stable time step for links & then nodes tMin = maxStep; tMinLink = getLinkStep(tMin, &minLink); tMinNode = getNodeStep(tMinLink, &minNode); // --- use smaller of the link and node time step tMin = tMinLink; if ( tMinNode < tMin ) { tMin = tMinNode ; minLink = -1; } // --- update count of times the minimum node or link was critical stats_updateCriticalTimeCount(minNode, minLink); // --- don't let time step go below an absolute minimum if ( tMin < MinRouteStep ) tMin = MinRouteStep; //(5.1.008) return tMin; } //============================================================================= double getLinkStep(double tMin, int *minLink) // // Input: tMin = critical time step found so far (sec) // Output: minLink = index of link with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for conduits based on Courant criterion. // { int i; // link index int k; // conduit index double q; // conduit flow (cfs) double t; // time step (sec) double tLink = tMin; // critical link time step (sec) // --- examine each conduit link for ( i = 0; i < Nobjects[LINK]; i++ ) { if ( Link[i].type == CONDUIT ) { // --- skip conduits with negligible flow, area or Fr k = Link[i].subIndex; q = fabs(Link[i].newFlow) / Conduit[k].barrels; if ( q <= 0.05 * Link[i].qFull || Conduit[k].a1 <= FUDGE || Link[i].froude <= 0.01 ) continue; // --- compute time step to satisfy Courant condition t = Link[i].newVolume / Conduit[k].barrels / q; t = t * Conduit[k].modLength / link_getLength(i); t = t * Link[i].froude / (1.0 + Link[i].froude) * CourantFactor; // --- update critical link time step if ( t < tLink ) { tLink = t; *minLink = i; } } } return tLink; } //============================================================================= double getNodeStep(double tMin, int *minNode) // // Input: tMin = critical time step found so far (sec) // Output: minNode = index of node with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for nodes based on max. allowable // projected change in depth. // { int i; // node index double maxDepth; // max. depth allowed at node (ft) double dYdT; // change in depth per unit time (ft/sec) double t1; // time needed to reach depth limit (sec) double tNode = tMin; // critical node time step (sec) // --- find smallest time so that estimated change in nodal depth // does not exceed safety factor * maxdepth for ( i = 0; i < Nobjects[NODE]; i++ ) { // --- see if node can be skipped if ( Node[i].type == OUTFALL ) continue; if ( Node[i].newDepth <= FUDGE) continue; if ( Node[i].newDepth + FUDGE >= Node[i].crownElev - Node[i].invertElev ) continue; // --- define max. allowable depth change using crown elevation maxDepth = (Node[i].crownElev - Node[i].invertElev) * 0.25; if ( maxDepth < FUDGE ) continue; dYdT = Xnode[i].dYdT; if (dYdT < FUDGE ) continue; // --- compute time to reach max. depth & compare with critical time t1 = maxDepth / dYdT; if ( t1 < tNode ) { tNode = t1; *minNode = i; } } return tNode; }
GB_unaryop__ainv_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_uint64 // op(A') function: GB_tran__ainv_fp32_uint64 // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_uint64 ( float *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
zraster.c
#include "../include/GL/gl.h" #include "../include/zbuffer.h" #include "msghandling.h" #include "zgl.h" static void gl_vertex_transform_raster(GLVertex* v) { GLContext* c = gl_get_context(); { /* no eye coordinates needed, no normal */ /* NOTE: W = 1 is assumed */ GLfloat* m = &c->matrix_model_projection.m[0][0]; v->pc.X = (v->coord.X * m[0] + v->coord.Y * m[1] + v->coord.Z * m[2] + m[3]); v->pc.Y = (v->coord.X * m[4] + v->coord.Y * m[5] + v->coord.Z * m[6] + m[7]); v->pc.Z = (v->coord.X * m[8] + v->coord.Y * m[9] + v->coord.Z * m[10] + m[11]); if (c->matrix_model_projection_no_w_transform) { v->pc.W = m[15]; } else { v->pc.W = (v->coord.X * m[12] + v->coord.Y * m[13] + v->coord.Z * m[14] + m[15]); } m = &c->matrix_stack_ptr[0]->m[0][0]; v->ec.X = (v->coord.X * m[0] + v->coord.Y * m[1] + v->coord.Z * m[2] + m[3]); v->ec.Y = (v->coord.X * m[4] + v->coord.Y * m[5] + v->coord.Z * m[6] + m[7]); v->ec.Z = (v->coord.X * m[8] + v->coord.Y * m[9] + v->coord.Z * m[10] + m[11]); v->ec.W = (v->coord.X * m[12] + v->coord.Y * m[13] + v->coord.Z * m[14] + m[15]); } v->clip_code = gl_clipcode(v->pc.X, v->pc.Y, v->pc.Z, v->pc.W); } void glRasterPos4f(GLfloat x, GLfloat y, GLfloat z, GLfloat w) { GLParam p[5]; p[0].op = OP_RasterPos; p[1].f = x; p[2].f = y; p[3].f = z; p[4].f = w; gl_add_op(p); } void glopRasterPos(GLParam* p) { GLContext* c = gl_get_context(); GLVertex v; v.coord.X = p[1].f; v.coord.Y = p[2].f; v.coord.Z = p[3].f; v.coord.W = p[4].f; gl_vertex_transform_raster(&v); if (v.clip_code == 0) { { GLfloat winv = 1.0 / v.pc.W; v.zp.x = (GLint)(v.pc.X * winv * c->viewport.scale.X + c->viewport.trans.X); v.zp.y = (GLint)(v.pc.Y * winv * c->viewport.scale.Y + c->viewport.trans.Y); v.zp.z = (GLint)(v.pc.Z * winv * c->viewport.scale.Z + c->viewport.trans.Z); } c->rasterpos.v[0] = v.zp.x; c->rasterpos.v[1] = v.zp.y; c->rastervertex = v; /* c->rasterpos.v[2] = v.zp.z;*/ c->rasterpos_zz = v.zp.z >> ZB_POINT_Z_FRAC_BITS; c->rasterposvalid = 1; } else c->rasterposvalid = 0; } void glRasterPos2f(GLfloat x, GLfloat y) { glRasterPos4f(x, y, 0, 1); } void glRasterPos3f(GLfloat x, GLfloat y, GLfloat z) { glRasterPos4f(x, y, z, 1); } void glRasterPos2fv(GLfloat* v) { glRasterPos2f(v[0], v[1]); } void glRasterPos3fv(GLfloat* v) { glRasterPos3f(v[0], v[1], v[2]); } void glRasterPos4fv(GLfloat* v) { glRasterPos4f(v[0], v[1], v[2], v[3]); } void glDrawPixels(GLsizei width, GLsizei height, GLenum format, GLenum type, void* data) { /* TODO: Come up with a clever scheme for storing the data to avoid pointer dependency. */ #if TGL_FEATURE_RENDER_BITS == 32 if (type != GL_UNSIGNED_INT && type != GL_UNSIGNED_INT_8_8_8_8) { tgl_warning("\nERROR: Incorrect type for glDrawPixels. It MUST be GL_UNSIGNED_INT or GL_UNSIGNED_INT_8_8_8_8, A R G B!"); return; } #elif TGL_FEATURE_RENDER_BITS == 16 if (type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_SHORT_5_6_5) { tgl_warning("\nERROR: Incorrect type for glDrawPixels. it MUST be GL_UNSIGNED_SHORT or GL_UNSIGNED_SHORT_5_6_5, R5 G6 B5!"); return; } #else #error "Bad TGL_FEATURE_RENDER_BITS" #endif if (format != GL_RGB) { tgl_warning("\nERROR: Incorrect format for glDrawPixels."); return; } GLParam p[6]; p[0].op = OP_DrawPixels; p[1].i = width; p[2].i = height; p[3].p = data; gl_add_op(p); } #define ZCMP(z, zpix) (!(zbdt) || z >= (zpix)) #define CLIPTEST(_x, _y, _w, _h) ((0 <= _x) && (_w > _x) && (0 <= _y) && (_h > _y)) void glopDrawPixels(GLParam* p) { GLContext* c = gl_get_context(); GLint sy, sx, ty, tx; GLint w = p[1].i; GLint h = p[2].i; V4 rastpos = c->rasterpos; ZBuffer* zb = c->zb; PIXEL* d = p[3].p; PIXEL* pbuf = zb->pbuf; GLushort* zbuf = zb->zbuf; GLubyte zbdw = zb->depth_write; GLubyte zbdt = zb->depth_test; GLint tw = zb->xsize; GLint th = zb->ysize; GLfloat pzoomx = c->pzoomx; GLfloat pzoomy = c->pzoomy; GLint zz = c->rasterpos_zz; #if TGL_FEATURE_BLEND_DRAW_PIXELS == 1 TGL_BLEND_VARS #endif #if TGL_FEATURE_BLEND == 1 #if TGL_FEATURE_BLEND_DRAW_PIXELS == 1 GLuint zbeb = zb->enable_blend; #endif #endif if (!c->rasterposvalid)return; #if TGL_FEATURE_ALT_RENDERMODES == 1 if (c->render_mode == GL_SELECT) { gl_add_select(zz, zz); return; } else if (c->render_mode == GL_FEEDBACK) { gl_add_feedback(GL_DRAW_PIXEL_TOKEN, &(c->rastervertex), NULL, NULL, 0); return; } #endif #if TGL_FEATURE_MULTITHREADED_DRAWPIXELS == 1 #pragma omp parallel for for (sy = 0; sy < h; sy++) for (sx = 0; sx < w; sx++) { PIXEL col = d[sy * w + sx]; V4 rastoffset; rastoffset.v[0] = rastpos.v[0] + (GLfloat)sx * pzoomx; rastoffset.v[1] = rastpos.v[1] - ((GLfloat)(h - sy) * pzoomy); rastoffset.v[2] = rastoffset.v[0] + pzoomx; rastoffset.v[3] = rastoffset.v[1] - pzoomy; for (ty = rastoffset.v[1]; (GLfloat)ty > rastoffset.v[3]; ty--) for (tx = rastoffset.v[0]; (GLfloat)tx < rastoffset.v[2]; tx++) if (CLIPTEST(tx, ty, tw, th)) { GLushort* pz = zbuf + (ty * tw + tx); if (ZCMP(zz, *pz)) { #if TGL_FEATURE_BLEND == 1 #if TGL_FEATURE_BLEND_DRAW_PIXELS == 1 if (!zbeb) pbuf[tx + ty * tw] = col; else TGL_BLEND_FUNC(col, pbuf[tx + ty * tw]) #else pbuf[tx + ty * tw] = col; #endif #else pbuf[tx + ty * tw] = col; #endif if (zbdw) *pz = zz; } } } #else for (sy = 0; sy < h; sy++) for (sx = 0; sx < w; sx++) { PIXEL col = d[sy * w + sx]; V4 rastoffset; rastoffset.v[0] = rastpos.v[0] + (GLfloat)sx * pzoomx; rastoffset.v[1] = rastpos.v[1] - ((GLfloat)(h - sy) * pzoomy); rastoffset.v[2] = rastoffset.v[0] + pzoomx; rastoffset.v[3] = rastoffset.v[1] - pzoomy; for (ty = rastoffset.v[1]; (GLfloat)ty > rastoffset.v[3]; ty--) for (tx = rastoffset.v[0]; (GLfloat)tx < rastoffset.v[2]; tx++) if (CLIPTEST(tx, ty, tw, th)) { GLushort* pz = zbuf + (ty * tw + tx); if (ZCMP(zz, *pz)) { #if TGL_FEATURE_BLEND == 1 #if TGL_FEATURE_BLEND_DRAW_PIXELS == 1 if (!zbeb) pbuf[tx + ty * tw] = col; else TGL_BLEND_FUNC(col, pbuf[tx + ty * tw]) #else pbuf[tx + ty * tw] = col; #endif #else pbuf[tx + ty * tw] = col; #endif if (zbdw) *pz = zz; } } } #endif } void glPixelZoom(GLfloat x, GLfloat y) { GLParam p[3]; p[0].op = OP_PixelZoom; p[1].f = x; p[2].f = y; gl_add_op(p); } void glopPixelZoom(GLParam* p) { GLContext* c = gl_get_context(); c->pzoomx = p[1].f; c->pzoomy = p[2].f; }
polybench.c
/** * polybench.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR # define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB # define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI # include <papi.h> # define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char* _polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #ifdef POLYBENCH_TIME struct timeval Tp; int stat; stat = gettimeofday (&Tp, NULL); if (stat != 0) printf ("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double* flush = (double*) calloc (cs, sizeof(double)); int i; double tmp = 0.0; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < cs; i++) tmp += flush[i]; assert (tmp <= 10.0); free (flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* Use FIFO scheduler to limit OS interference. Program must be run as root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO); sched_setscheduler (0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER); sched_setscheduler (0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line); else { fprintf (stdout,"%-40s SKIPPED\n", file); fprintf (stdout,"Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf (buf, "System error in %s", call); perror (buf); } else if (retval > 0) fprintf (stdout,"Error: %s\n", call); else if (retval == 0) fprintf (stdout,"Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN); fprintf (stdout,"Error in %s: %s\n", call, errstring); } fprintf (stdout,"\n"); if (PAPI_is_initialized ()) PAPI_shutdown (); exit (1); } void polybench_papi_init() { # ifdef _OPENMP #pragma omp parallel { #pragma omp master { if (omp_get_max_threads () < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads () - 1; } #pragma omp barrier if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail (__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code (_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_close() { # ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized ()) PAPI_shutdown (); # ifdef _OPENMP } } #pragma omp barrier # endif } int polybench_papi_start_counter(int evid) { # ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); # endif # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr); if (PAPI_add_event (polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_start", retval); # ifdef _OPENMP } } #pragma omp barrier # endif return 0; } void polybench_papi_stop_counter(int evid) { # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read (polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval); # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_print() { int verbose = 0; # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf ("On thread %d:\n", polybench_papi_counters_threadid); #endif int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf ("%s=", _polybench_papi_eventlist[evid]); printf ("%llu ", polybench_papi_values[evid]); if (verbose) printf ("\n"); } printf ("\n"); # ifdef _OPENMP } } #pragma omp barrier # endif } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler (); #endif } void polybench_timer_start() { polybench_prepare_instruments (); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock (); #else polybench_c_start = rdtsc (); #endif } void polybench_timer_stop() { #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock (); #else polybench_c_end = rdtsc (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler (); #endif } void polybench_timer_print(char *text) { #ifdef POLYBENCH_GFLOPS if (__polybench_program_total_flops == 0) { printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf ("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf ("%s: %0.2lf\n", text (__polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else # ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf ("%s: %0.6f\n", text, polybench_t_end - polybench_t_start); # else printf ("%s: %Ld\n", text, polybench_c_end - polybench_c_start); # endif #endif } static void * xmalloc (size_t num) { void* new = NULL; int ret = posix_memalign (&new, 32, num); if (! new || ret) { fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory"); exit (1); } return new; } void* polybench_alloc_data(unsigned long long int n, int elt_size) { /// FIXME: detect overflow! size_t val = n; val *= elt_size; void* ret = xmalloc (val); return ret; }
eigrp_fmt_plug.c
/* * Cracker for EIGRP (Cisco's proprietary routing protocol) MD5 + HMAC-SHA-256 authentication. * http://tools.ietf.org/html/draft-savage-eigrp-00 * * This is dedicated to Darya. You inspire me. * * This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_eigrp; #elif FMT_REGISTERS_H john_register_one(&fmt_eigrp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> // OMP_SCALE on Intel core i7 // 2048 - 12030k/11596k // 4096 - 12575k/13114k // 8192 - 13316k/13921k // 16k - 13547k/14458k // 32k - 16106k/14700k // 64k - 16106k/14700k // 64k - 16674k/14674k // 128k - 17795k/14663k --test=0 has a tiny delay, but not bad. #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 131072 #endif #endif #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #include "escrypt/sha256.h" #define FORMAT_LABEL "eigrp" #define FORMAT_NAME "EIGRP MD5 / HMAC-SHA-256 authentication" #define FORMAT_TAG "$eigrp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 81 // IOU accepts larger strings but doesn't use them fully, passwords are zero padded to a minimum length of 16 (for MD5 hashes only)! #define BINARY_SIZE 16 // MD5 hash or first 16 bytes of HMAC-SHA-256 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MAX_SALT_SIZE 1024 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" static struct fmt_tests tests[] = { {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$0$x$1a42aaf8ebe2f766100ea1fa05a5fa55", "password12345"}, {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$0$x$f29e7d44351d37e6fc71e2aacca63d28", "1234567812345"}, {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$1$0001000c010001000000000f000400080500030000f5000c0000000400$560c87396267310978883da92c0cff90", "password12345"}, {"$eigrp$2$020500000000000000000000000000000000002a000200280002001000000001000000000000000000000000$0$x$61f237e29d28538a372f01121f2cd12f", "123456789012345678901234567890"}, {"$eigrp$2$0205000000000000000000000000000000000001000200280002001000000001000000000000000000000000$0$x$212acb1cb76b31a810a9752c5cf6f554", "ninja"}, // this one is for @digininja :-) {"$eigrp$3$020500000000000000000000000000000000000a00020038000300200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000c010001000000000f000400080f00020000f5000a000000020000$0$x$1$10.0.0.2$cff66484cea20c6f58f175f8c004fc6d73be72090e53429c2616309aca38d5f3", "password12345"}, // HMAC-SHA-256 hash {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int length; int algo_type; int have_extra_salt; int extra_salt_length; unsigned char salt[MAX_SALT_SIZE]; char ip[45 + 1]; int ip_length; MD5_CTX prep_salt; unsigned char extra_salt[MAX_SALT_SIZE]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *ptrkeep; int res; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; ptrkeep = strdup(ciphertext); p = &ptrkeep[TAG_LENGTH]; if ((p = strtokm(p, "$")) == NULL) goto err; if (!isdec(p)) goto err; res = atoi(p); if (res != 2 && res != 3) // MD5 hashes + HMAC-SHA256 hashes goto err; if ((p = strtokm(NULL, "$")) == NULL) // salt goto err; if (strlen(p) > MAX_SALT_SIZE*2) goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 1) goto err; if ((p = strtokm(NULL, "$")) == NULL) // salt2 (or a junk field) goto err; if (res == 1) { // we only care about extra salt IF that number was a 1 if (strlen(p) > MAX_SALT_SIZE*2) goto err; if (!ishexlc(p)) goto err; } if ((p = strtokm(NULL, "$")) == NULL) // binary hash (or IP) goto err; if (!strcmp(p, "1")) { // this was an IP if ((p = strtokm(NULL, "$")) == NULL) // IP goto err; // not doing too much IP validation. Length will have to do. // 5 char ip 'could' be 127.1 I know of no short IP. 1.1.1.1 is longer. if (strlen(p) < 5 || strlen(p) > sizeof(cur_salt->ip)) goto err; if ((p = strtokm(NULL, "$")) == NULL) // ok, now p is binary. goto err; } res = strlen(p); if (res != BINARY_SIZE * 2 && res != 32 * 2) goto err; if (!ishexlc(p)) goto err; MEM_FREE(ptrkeep); return 1; err: MEM_FREE(ptrkeep); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; char *p, *q; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; p = ciphertext; cs.algo_type = atoi(p); p = p + 2; // salt start q = strchr(p, '$'); len = (q - p) / 2; cs.length = len; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; q = q + 1; cs.have_extra_salt = atoi(q); if (cs.have_extra_salt == 1) { p = q + 2; q = strchr(p, '$'); cs.extra_salt_length = (q - p) / 2; for (i = 0; i < cs.extra_salt_length; i++) cs.extra_salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else { /* skip over extra_salt */ p = q + 2; q = strchr(p, '$'); } /* dirty hack for HMAC-SHA-256 support */ if (*q == '$' && *(q+1) == '1' && *(q+2) == '$') { /* IP destination field */ p = q + 3; q = strchr(p, '$'); cs.ip_length = q - p; strncpy(cs.ip, p, cs.ip_length); } /* Better do this once than 10 million times per second */ if (cs.algo_type == 2) { MD5_Init(&cs.prep_salt); MD5_Update(&cs.prep_salt, cs.salt, cs.length); } return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static unsigned char zeropad[16] = {0}; static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; if (cur_salt->algo_type == 2) { memcpy(&ctx, &cur_salt->prep_salt, sizeof(MD5_CTX)); MD5_Update(&ctx, saved_key[index], saved_len[index]); if (saved_len[index] < 16) { MD5_Update(&ctx, zeropad, 16 - saved_len[index]); } // do we have extra_salt? if (cur_salt->have_extra_salt) { MD5_Update(&ctx, cur_salt->extra_salt, cur_salt->extra_salt_length); } MD5_Final((unsigned char*)crypt_out[index], &ctx); } else { HMAC_SHA256_CTX hctx[1]; unsigned char output[32]; unsigned char buffer[1 + PLAINTEXT_LENGTH + 45 + 1] = { 0 }; // HMAC key ==> '\n' + password + IP address buffer[0] = '\n'; // WTF? memcpy(buffer + 1, saved_key[index], saved_len[index]); memcpy(buffer + 1 + saved_len[index], cur_salt->ip, cur_salt->ip_length); HMAC__SHA256_Init(hctx, buffer, 1 + saved_len[index] + cur_salt->ip_length); HMAC__SHA256_Update(hctx, cur_salt->salt, cur_salt->length); HMAC__SHA256_Final(output, hctx); memcpy((unsigned char*)crypt_out[index], output, BINARY_SIZE); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void eigrp_set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static unsigned int get_cost(void *salt) { return (unsigned int)((struct custom_salt*)salt)->algo_type; } struct fmt_main fmt_eigrp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "algorithm [2:MD5 3:HMAC-SHA-256]", }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { get_cost, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, eigrp_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif
omp_init_lock.c
// RUN: %libomp-compile-and-run // REQUIRES: dummy #include "omp_testsuite.h" #include <stdio.h> // This should be slightly less than KMP_I_LOCK_CHUNK, which is 1024 #define LOCKS_PER_ITER 1000 #define ITERATIONS (REPETITIONS + 1) // This tests concurrently using locks on one thread while initializing new // ones on another thread. This exercises the global lock pool. int test_omp_init_lock() { int i; omp_lock_t lcks[ITERATIONS * LOCKS_PER_ITER]; #pragma omp parallel for schedule(static) num_threads(NUM_TASKS) for (i = 0; i < ITERATIONS; i++) { int j; omp_lock_t *my_lcks = &lcks[i * LOCKS_PER_ITER]; for (j = 0; j < LOCKS_PER_ITER; j++) { omp_init_lock(&my_lcks[j]); } for (j = 0; j < LOCKS_PER_ITER * 100; j++) { omp_set_lock(&my_lcks[j % LOCKS_PER_ITER]); omp_unset_lock(&my_lcks[j % LOCKS_PER_ITER]); } } // Wait until all repetitions are done. The test is exercising growth of // the global lock pool, which does not shrink when no locks are allocated. { int j; for (j = 0; j < ITERATIONS * LOCKS_PER_ITER; j++) { omp_destroy_lock(&lcks[j]); } } return 0; } int main() { // No use repeating this test, since it's exercising a private global pool // which is not reset between test iterations. return test_omp_init_lock(); }
main.c
// 2D lid-driven Cavity flow with explicit, central-difference projection method // See Ferziger Computational Methods for Fluid Dynamics, section 7.3.2 // Travis Burrows #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> // Simulation Parameters #define N 25 // Number of points to discretize equations in X and Y (including BCs) #define Re 100.0 // Reynolds number #define SAVETXT 1 // Controls whether to save a text file output #define dt 0.04 // dt #define THRESH 1E-8 // Defines convergence of dp/dt #define DEBUG 1 // Prints extra information #define OMEGA 1.95 // SOR Relaxation factor #define PARALLEL 1 // toggles parallel spatial loops // Global Constants #define H 1.0 // length of side of square domain #define MAXITER 1E6 // Maximum iterations // Macros #define LU(i,j,Ni) (((Ni) * (j)) + (i)) // Look-up function, 2d, Ni x Nj #define P2(x) ((x) * (x)) // x^2 // Function Prototypes double* malloc_vectord (int n1); void free_vectord (double *a); void zeros (double *array, int n); void linspace (double *array, double start, double stop, int num); void copy (double *source, double *destination, int n); void datwrite (char filename[], char name1[], double *x, char name2[], double *y,char name3[], double *value3, char name4[], double *value4, char name5[], double *value5); void printVector (char name[], double *vector, int n); void printMatrix (char name[], double *vector, int n1, int n2); void enforceBCs (double *u, double *v, double *p); double diff (double *array1, double *array2, int n); double mean (double *array, int n); void datwrite1 (char filename[], char name1[], double *x, char name2[], double *y,char name3[], double *value3, int nx, int ny); int main(void) { // U has gridpoints on side boundaries, and sandwiching top / bottom boundaries (N-1 x N) // V has gridpoints on top / bottom boundaries, and sandwiching side boundaries (N x N-1) // P has gridpoints sandwiching all boundaries (N x N) // All grids have same dx, and are offset from each other by dx/2 // U grid is vertical midpoints of P, V grid is horizontal midpoints of P // Allocate Memory double *U = malloc_vectord(N*(N-1)); double *V = malloc_vectord(N*(N-1)); double *P = malloc_vectord(N*N); double *Up = malloc_vectord(N*(N-1)); double *Vp = malloc_vectord(N*(N-1)); double *Pp = malloc_vectord(N*N); double *Pp2 = malloc_vectord(N*N); double *xP = malloc_vectord(N*N); double *yP = malloc_vectord(N*N); double *Hx = malloc_vectord(N*(N-1)); double *Hy = malloc_vectord(N*(N-1)); double Ue,Uw,Un,Us,Vn,Vs,UP,UE,UW,US,UN,Vne,Vnw,Vse,Vsw,VP,VE,VW,VN,VS,Une,Unw,Use,Usw,Ve,Vw; double du,dv,dp,start,stop; // Initialize variables double dx = H / (N-2); double dy = dx; linspace(xP, -dx/2.0, H+dx/2.0, N); copy(xP,yP,N); char filename[80]; sprintf(filename,"Solution_n=%dRe=%.0f.txt",N,Re); if (DEBUG == 1){ printf("dx = %.3f\n", dx); printVector("x_P", xP, N); } // Initial Guess zeros(U,N*(N-1)); zeros(V,N*(N-1)); zeros(Hx,N*(N-1)); zeros(Hy,N*(N-1)); zeros(P,N*N); enforceBCs(U,V,P); // Set number of parallel threads int maxthreads = 1; if (PARALLEL==1) maxthreads = omp_get_max_threads(); printf("Threads used:\t%d\n", maxthreads); omp_set_num_threads(maxthreads); // Main iteration for (int k = 0; k < MAXITER; k++){ start = omp_get_wtime(); copy(U,Up,N*(N-1)); copy(V,Vp,N*(N-1)); copy(P,Pp,N*N); // Find Hx (on u grid) #pragma omp parallel for private(Ue,Uw,Un,Us,Vn,Vs,UP,UE,UW,US,UN,Vne,Vnw,Vse,Vsw) collapse(2) for (int i = 1; i < N-2; i++){ for (int j = 1; j < N-1; j++){ UP = Up[LU(i,j,N-1)]; UN = Up[LU(i,j+1,N-1)]; US = Up[LU(i,j-1,N-1)]; UE = Up[LU(i+1,j,N-1)]; UW = Up[LU(i-1,j,N-1)]; Ue = 0.5 * (UE + UP); Uw = 0.5 * (UW + UP); Un = 0.5 * (UN + UP); Us = 0.5 * (US + UP); Vne = Vp[LU(i+1,j,N)]; Vnw = Vp[LU(i,j,N)]; Vse = Vp[LU(i+1,j-1,N)]; Vsw = Vp[LU(i,j-1,N)]; Vn = 0.5 * (Vne + Vnw); Vs = 0.5 * (Vse + Vsw); Hx[LU(i,j,N-1)] = (1.0/Re) * ((UE + UW - 2*UP)/P2(dx) + (UN + US - 2*UP)/P2(dy)) - ((P2(Ue) - P2(Uw))/dx + (Un*Vn - Us*Vs)/dy); } } // Find Hy (on v grid) #pragma omp parallel for private(VP,VE,VW,VN,VS,Une,Unw,Use,Usw,Ve,Vw,Vn,Vs,Ue,Uw) collapse(2) for (int i = 1; i < N-1; i++){ for (int j = 1; j < N-2; j++){ VP = Vp[LU(i,j,N)]; VN = Vp[LU(i,j+1,N)]; VS = Vp[LU(i,j-1,N)]; VE = Vp[LU(i+1,j,N)]; VW = Vp[LU(i-1,j,N)]; Ve = 0.5 * (VE + VP); Vw = 0.5 * (VW + VP); Vn = 0.5 * (VN + VP); Vs = 0.5 * (VS + VP); Une = Up[LU(i,j+1,N-1)]; Unw = Up[LU(i-1,j+1,N-1)]; Use = Up[LU(i,j,N-1)]; Usw = Up[LU(i-1,j,N-1)]; Ue = 0.5 * (Une + Use); Uw = 0.5 * (Unw + Usw); Hy[LU(i,j,N)] = (1.0/Re) * ((VE + VW - 2*VP)/P2(dx) + (VN + VS - 2*VP)/P2(dy)) - ((P2(Vn) - P2(Vs))/dy + (Ue*Ve - Uw*Vw)/dx); } } // Solve for Pressure double PE,PW,PN,PS,Hxe,Hxw,Hyn,Hys; for (int kk = 0; kk < MAXITER; kk++) { copy(P,Pp2,N*N); #pragma omp parallel for private(PE,PW,PN,PS,Hxe,Hxw,Hyn,Hys) collapse(2) for (int i = 1; i < N - 1; i++) { for (int j = 1; j < N - 1; j++) { PE = P[LU(i+1,j,N)]; PW = P[LU(i-1,j,N)]; PN = P[LU(i,j+1,N)]; PS = P[LU(i,j-1,N)]; Hxe = Hx[LU(i,j,N-1)]; Hxw = Hx[LU(i-1,j,N-1)]; Hyn = Hy[LU(i,j,N)]; Hys = Hy[LU(i,j-1,N)]; P[LU(i,j,N)] = (1.0 - OMEGA) * Pp2[LU(i,j,N)] + OMEGA * (-1.0/(2.0*(P2(dx) + P2(dy)))) * (dx * P2(dy) * (Hxe - Hxw) + dy * P2(dx) * (Hyn - Hys) - P2(dy) * (PE + PW) - P2(dx) * (PN + PS)); } } enforceBCs(U,V,P); dp = diff(P,Pp2,N*N); if (dp < 1E-10) break; } // Rezero Pressure mean double pmean = mean(P,N*N); for (int i = 1; i < N - 1; i++) { for (int j = 1; j < N - 1; j++) { P[LU(i,j,N)] -= pmean; } } // Calculate u double Pe,Pw; for (int i = 1; i < N - 2; i++) { for (int j = 1; j < N - 1; j++) { Pe = P[LU(i+1,j,N)]; Pw = P[LU(i,j,N)]; U[LU(i,j,N-1)] = Up[LU(i,j,N-1)] + dt * (Hx[LU(i,j,N-1)] - (Pe - Pw) / dx); } } // Calculate v double Pn,Ps; for (int i = 1; i < N - 1; i++){ for (int j = 1; j < N - 2; j++) { Pn = P[LU(i,j+1,N)]; Ps = P[LU(i,j,N)]; V[LU(i,j,N)] = Vp[LU(i,j,N)] + dt * (Hy[LU(i,j,N)] - (Pn - Ps) / dy); } } // Enforce BCs enforceBCs(U,V,P); // Determine convergence du = diff(U,Up,N*(N-1)) /dt; dv = diff(V,Vp,N*(N-1)) /dt; dp = diff(P,Pp,N*N) /dt; int printint=50; if (DEBUG) printint = 1; if (k % printint == 0){ printf("\nIteration %d:\n", k); printf("du:\t%.3e\n",du); printf("dv:\t%.3e\n",dv); printf("dp:\t%.3e\n",dp); } stop = omp_get_wtime(); printf("Time:\t%.3e s\n", stop-start); // Test if converged if (dp < THRESH){ break; } } // Export non-interpolated grids double *xU = malloc_vectord(N-1); for (int i = 0; i < N-1; i++){ xU[i] = 0.5*(xP[i] + xP[i+1]); } // Export tecplot files if (SAVETXT==1){ sprintf(filename,"ugrid_n=%dRe=%.0f.dat",N,Re); datwrite1(filename, "x", xU, "y", yP, "u",U,N-1,N); sprintf(filename,"vgrid_n=%dRe=%.0f.dat",N,Re); datwrite1(filename, "x", xP, "y", xU, "v",V,N,N-1); } // Free memory free_vectord(U); free_vectord(V); free_vectord(P); free_vectord(Up); free_vectord(Vp); free_vectord(Pp); free_vectord(Pp2); free_vectord(xP); free_vectord(yP); free_vectord(Hx); free_vectord(Hy); free_vectord(xU); // Return return 0; } // Copies a vector void copy(double *source, double *destination, int n){ for (int i = 0; i < n; i++){ destination[i] = source[i]; } } // Returns evenly spaced numbers over a specified interval void linspace(double *array, double start, double stop, int num){ for (int i = 0; i < num; i++){ array[i] = start + ((double) i) * (stop - start) / (double) (num-1); } } // Allocates memory for 1D array of doubles double *malloc_vectord(int n1) { if (n1 <= 0) // Checks for invalid inputs printf("Invalid input into malloc_vectord\n"); else { double *mat = malloc(n1 * sizeof(double)); if (mat == NULL) printf("Error allocating memory!"); return mat; } } // Frees memory for 1D double array void free_vectord(double *a) { if (a == NULL) printf("Error: Null input in free_vectord"); free((void *)a); } // Assigns zeros to a vector void zeros(double *array, int n){ for (int i = 0; i < n; i++){ array[i] = 0.0; } } // Writes Tecplot file void datwrite(char filename[], char name1[], double *x, char name2[], double *y,char name3[], double *value3, char name4[], double *value4, char name5[], double *value5){ FILE *f = fopen(filename,"w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } fprintf(f, "TITLE=\"%s\" VARIABLES=\"%s\", \"%s\", \"%s\", \"%s\", \"%s\" ZONE T=\"%s\" I=%d J=%d F=POINT\n", filename, name1, name2, name3, name4, name5, filename,N-2,N-2); for (int i = 1; i < N - 1; i++) { for (int j = 1; j < N - 1; j++) { fprintf(f, "%.10e, %.10e, %.10e, %.10e, %.10e\n", x[i], y[j], value3[LU(i,j, N)],value4[LU(i,j, N)],value5[LU(i,j, N)]); } } fclose(f); } // Writes Tecplot file void datwrite1(char filename[], char name1[], double *x, char name2[], double *y,char name3[], double *value3, int nx, int ny){ FILE *f = fopen(filename,"w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } fprintf(f, "TITLE=\"%s\" VARIABLES=\"%s\", \"%s\", \"%s\" ZONE T=\"%s\" I=%d J=%d F=POINT\n", filename, name1, name2, name3, filename,nx-2,ny-2); for (int j = 1; j < ny-1; j++) { for (int i = 1; i < nx-1; i++) { fprintf(f, "%.10e, %.10e, %.10e,\n", x[i], y[j], value3[LU(i,j, nx)]); } } fclose(f); } // Prints a vector void printVector(char name[], double *vector, int n){ printf("%s:\t", name); for (int i = 0; i < n; i++){ printf("%.3f ", vector[i]); } printf("\n"); } // Prints a matrix void printMatrix(char name[], double *vector, int n1, int n2){ printf("%s:\n", name); for (int j = 0; j < n2; j++){ for (int i = 0; i < n1; i++) { printf("%.4f\t", vector[LU(i,j,n1)]); } printf("\n"); } printf("\n"); } // Enforces Lid driven cavity flow boundary conditions void enforceBCs(double *u, double *v, double *p){ // U has gridpoints on side boundaries, and sandwiching top / bottom boundaries (N-1 x N) // V has gridpoints on top / bottom boundaries, and sandwiching side boundaries (N x N-1) // P has gridpoints sandwiching all boundaries for (int i = 0; i < N; i++) { // Bottom Surface v[LU(i,0,N)] = 0.0; p[LU(i,0,N)] = p[LU(i,1,N)]; if (i < N-1) u[LU(i,0,N-1)] = -u[LU(i,1,N-1)]; // Top Surface v[LU(i,N-2,N)] = 0.0; p[LU(i,N-1,N)] = p[LU(i,N-2,N)]; if (i < N-1) u[LU(i,N-1,N-1)] = 2.0 - u[LU(i,N-2,N-1)]; // Left Surface u[LU(0,i,N-1)] = 0.0; p[LU(0,i,N)] = p[LU(1,i,N)]; if (i < N-1) v[LU(0,i,N)] = -v[LU(1,i,N)]; // Right Surface u[LU(N-2,i,N-1)] = 0.0; p[LU(N-1,i,N)] = p[LU(N-2,i,N)]; if (i < N-1) v[LU(N-1,i,N)] = -v[LU(N-2,i,N)]; } } // Returns L2 norm of difference of two arrays double diff(double *array1, double *array2, int n){ double difference = 0; for (int i = 0; i < n; i++){ difference += P2(array1[i] - array2[i]); } return sqrt(difference/(double) n); } // Returns mean of an array double mean(double *array, int n) { double average = 0; for (int i = 0; i < n; i++) { average += array[i] / n; } return average; }
zboxloop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include "_hypre_utilities.h" #include "HYPRE_struct_ls.h" #include "HYPRE_krylov.h" #include "_hypre_struct_mv.h" /*-------------------------------------------------------------------------- * Test driver to time new boxloops and compare to the old ones *--------------------------------------------------------------------------*/ #define DEVICE_VAR hypre_int main( hypre_int argc, char *argv[] ) { HYPRE_Int arg_index; HYPRE_Int print_usage; HYPRE_Int nx, ny, nz; HYPRE_Int P, Q, R; HYPRE_Int time_index; HYPRE_Int num_procs, myid; HYPRE_Int dim; HYPRE_Int rep, reps, fail, sum; HYPRE_Int size; hypre_Box *x1_data_box, *x2_data_box, *x3_data_box, *x4_data_box; //HYPRE_Int xi1, xi2, xi3, xi4; HYPRE_Int xi1; HYPRE_Real *xp1, *xp2, *xp3, *xp4; hypre_Index loop_size, start, unit_stride, index; /*----------------------------------------------------------- * Initialize some stuff *-----------------------------------------------------------*/ /* Initialize MPI */ hypre_MPI_Init(&argc, &argv); hypre_MPI_Comm_size(hypre_MPI_COMM_WORLD, &num_procs ); hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid ); /*----------------------------------------------------------- * Set defaults *-----------------------------------------------------------*/ dim = 3; nx = 10; ny = 10; nz = 10; P = num_procs; Q = 1; R = 1; /*----------------------------------------------------------- * Parse command line *-----------------------------------------------------------*/ print_usage = 0; arg_index = 1; while (arg_index < argc) { if ( strcmp(argv[arg_index], "-n") == 0 ) { arg_index++; nx = atoi(argv[arg_index++]); ny = atoi(argv[arg_index++]); nz = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-P") == 0 ) { arg_index++; P = atoi(argv[arg_index++]); Q = atoi(argv[arg_index++]); R = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-d") == 0 ) { arg_index++; dim = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-help") == 0 ) { print_usage = 1; break; } else { arg_index++; } } /*----------------------------------------------------------- * Print usage info *-----------------------------------------------------------*/ if ( (print_usage) && (myid == 0) ) { hypre_printf("\n"); hypre_printf("Usage: %s [<options>]\n", argv[0]); hypre_printf("\n"); hypre_printf(" -n <nx> <ny> <nz> : problem size per block\n"); hypre_printf(" -P <Px> <Py> <Pz> : processor topology\n"); hypre_printf(" -d <dim> : problem dimension (2 or 3)\n"); hypre_printf("\n"); } if ( print_usage ) { exit(1); } /*----------------------------------------------------------- * Check a few things *-----------------------------------------------------------*/ if ((P*Q*R) > num_procs) { if (myid == 0) { hypre_printf("Error: PxQxR is more than the number of processors\n"); } exit(1); } else if ((P*Q*R) < num_procs) { if (myid == 0) { hypre_printf("Warning: PxQxR is less than the number of processors\n"); } } /*----------------------------------------------------------- * Initialize some stuff *-----------------------------------------------------------*/ hypre_SetIndex3(start, 1, 1, 1); hypre_SetIndex3(loop_size, nx, ny, nz); hypre_SetIndex3(unit_stride, 1, 1, 1); x1_data_box = hypre_BoxCreate(dim); x2_data_box = hypre_BoxCreate(dim); x3_data_box = hypre_BoxCreate(dim); x4_data_box = hypre_BoxCreate(dim); hypre_SetIndex3(hypre_BoxIMin(x1_data_box), 0, 0, 0); hypre_SetIndex3(hypre_BoxIMax(x1_data_box), nx+1, ny+1, nz+1); hypre_CopyBox(x1_data_box, x2_data_box); hypre_CopyBox(x1_data_box, x3_data_box); hypre_CopyBox(x1_data_box, x4_data_box); size = (nx+2)*(ny+2)*(nz+2); xp1 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); xp2 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); xp3 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); xp4 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); reps = 1000000000/(nx*ny*nz+1000); /*----------------------------------------------------------- * Print driver parameters *-----------------------------------------------------------*/ if (myid == 0) { hypre_printf("Running with these driver parameters:\n"); hypre_printf(" (nx, ny, nz) = (%d, %d, %d)\n", nx, ny, nz); hypre_printf(" (Px, Py, Pz) = (%d, %d, %d)\n", P, Q, R); hypre_printf(" dim = %d\n", dim); hypre_printf(" reps = %d\n", reps); } /*----------------------------------------------------------- * Check new boxloops *-----------------------------------------------------------*/ /* xp1 is already initialized to 0 */ zypre_BoxLoop1Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1); zypre_BoxLoop1For(xi1) { xp1[xi1] ++; } zypre_BoxLoop1End(xi1); /* Use old boxloop to check that values are set to 1 */ fail = 0; sum = 0; hypre_SerialBoxLoop1Begin(3, loop_size, x1_data_box, start, unit_stride, xi1); { sum += xp1[xi1]; if (xp1[xi1] != 1) { hypre_BoxLoopGetIndex(index); hypre_printf("*(%d,%d,%d) = %d\n", index[0], index[1], index[2], (HYPRE_Int) xp1[xi1]); fail = 1; } } hypre_SerialBoxLoop1End(xi1); if (sum != (nx*ny*nz)) { hypre_printf("*sum = %d\n", sum); fail = 1; } if (fail) { exit(1); } /*----------------------------------------------------------- * Synchronize so that timings make sense *-----------------------------------------------------------*/ hypre_MPI_Barrier(hypre_MPI_COMM_WORLD); /*----------------------------------------------------------- * Time old boxloops *-----------------------------------------------------------*/ /* Time BoxLoop0 */ time_index = hypre_InitializeTiming("BoxLoop0"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { xi1 = 0; hypre_BoxLoop0Begin(3, loop_size); { xp1[xi1] += xp1[xi1]; //xi1++; } hypre_BoxLoop0End(); } hypre_EndTiming(time_index); /* Time BoxLoop1 */ time_index = hypre_InitializeTiming("BoxLoop1"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { hypre_BoxLoop1Begin(3, loop_size, x1_data_box, start, unit_stride, xi1); { xp1[xi1] += xp1[xi1]; } hypre_BoxLoop1End(xi1); } hypre_EndTiming(time_index); /* Time BoxLoop2 */ time_index = hypre_InitializeTiming("BoxLoop2"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { hypre_BoxLoop2Begin(3, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2); { xp1[xi1] += xp1[xi1] + xp2[xi2]; } hypre_BoxLoop2End(xi1, xi2); } hypre_EndTiming(time_index); /* Time BoxLoop3 */ time_index = hypre_InitializeTiming("BoxLoop3"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { hypre_BoxLoop3Begin(3, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3); { xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3]; } hypre_BoxLoop3End(xi1, xi2, xi3); } hypre_EndTiming(time_index); /* Time BoxLoop4 */ time_index = hypre_InitializeTiming("BoxLoop4"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { hypre_BoxLoop4Begin(3, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3, x4_data_box, start, unit_stride, xi4); { xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4]; } hypre_BoxLoop4End(xi1, xi2, xi3, xi4); } hypre_EndTiming(time_index); hypre_PrintTiming("Old BoxLoop times", hypre_MPI_COMM_WORLD); hypre_FinalizeTiming(time_index); hypre_ClearTiming(); /*----------------------------------------------------------- * Time new boxloops *-----------------------------------------------------------*/ /* Time BoxLoop0 */ time_index = hypre_InitializeTiming("BoxLoop0"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { xi1 = 0; zypre_BoxLoop0Begin(dim, loop_size); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) firstprivate(xi1) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop0For() { xp1[xi1] += xp1[xi1]; xi1++; } zypre_BoxLoop0End(); } hypre_EndTiming(time_index); /* Time BoxLoop1 */ time_index = hypre_InitializeTiming("BoxLoop1"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop1Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop1For(xi1) { xp1[xi1] += xp1[xi1]; } zypre_BoxLoop1End(xi1); } hypre_EndTiming(time_index); /* Time BoxLoop2 */ time_index = hypre_InitializeTiming("BoxLoop2"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop2Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop2For(xi1, xi2) { xp1[xi1] += xp1[xi1] + xp2[xi2]; } zypre_BoxLoop2End(xi1, xi2); } hypre_EndTiming(time_index); /* Time BoxLoop3 */ time_index = hypre_InitializeTiming("BoxLoop3"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop3Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop3For(xi1, xi2, xi3) { xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3]; } zypre_BoxLoop3End(xi1, xi2, xi3); } hypre_EndTiming(time_index); /* Time BoxLoop4 */ time_index = hypre_InitializeTiming("BoxLoop4"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop4Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3, x4_data_box, start, unit_stride, xi4); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop4For(xi1, xi2, xi3, xi4) { xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4]; } zypre_BoxLoop4End(xi1, xi2, xi3, xi4); } hypre_EndTiming(time_index); hypre_PrintTiming("New BoxLoop times", hypre_MPI_COMM_WORLD); hypre_FinalizeTiming(time_index); hypre_ClearTiming(); /*----------------------------------------------------------- * Finalize things *-----------------------------------------------------------*/ hypre_BoxDestroy(x1_data_box); hypre_BoxDestroy(x2_data_box); hypre_BoxDestroy(x3_data_box); hypre_BoxDestroy(x4_data_box); hypre_TFree(xp1, HYPRE_MEMORY_HOST); hypre_TFree(xp2, HYPRE_MEMORY_HOST); hypre_TFree(xp3, HYPRE_MEMORY_HOST); hypre_TFree(xp4, HYPRE_MEMORY_HOST); /* Finalize MPI */ hypre_MPI_Finalize(); return (0); }
GB_unop__log1p_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log1p_fc64_fc64) // op(A') function: GB (_unop_tran__log1p_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_clog1p (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog1p (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_clog1p (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG1P || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log1p_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog1p (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog1p (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log1p_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bli_axpyv_bgq_int.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of The University of Texas at Austin nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" void bli_daxpyv_bgq_int ( conj_t conjx, dim_t n, double* restrict alpha, double* restrict x, inc_t incx, double* restrict y, inc_t incy, cntx_t* restrict cntx ) { if ( bli_zero_dim1( n ) ) return; // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. bool_t use_ref = FALSE; if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) ) { use_ref = TRUE; } // Call the reference implementation if needed. if ( use_ref == TRUE ) { BLIS_DAXPYV_KERNEL_REF( conjx, n, alpha, x, incx, y, incy, cntx ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds( 0 * sizeof(double), (double*)alpha ); #pragma omp parallel for for ( dim_t i = 0; i < n_run; i++ ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); zv = vec_madd( alphav, xv, yv ); vec_sta( zv, 0 * sizeof(double), &y[i*4] ); } for ( dim_t i = 0; i < n_left; i++ ) { y[4*n_run + i] += *alpha * x[4*n_run + i]; } }
path-mpi.c
#include <getopt.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <mpi.h> #include "mt19937p.h" #ifdef _OPENMP #include <omp.h> #else #include <sys/time.h> #endif //ldoc on /** * # The basic recurrence * * At the heart of the method is the following basic recurrence. * If $l_{ij}^s$ represents the length of the shortest path from * $i$ to $j$ that can be attained in at most $2^s$ steps, then * $$ * l_{ij}^{s+1} = \min_k \{ l_{ik}^s + l_{kj}^s \}. * $$ * That is, the shortest path of at most $2^{s+1}$ hops that connects * $i$ to $j$ consists of two segments of length at most $2^s$, one * from $i$ to $k$ and one from $k$ to $j$. Compare this with the * following formula to compute the entries of the square of a * matrix $A$: * $$ * a_{ij}^2 = \sum_k a_{ik} a_{kj}. * $$ * These two formulas are identical, save for the niggling detail that * the latter has addition and multiplication where the former has min * and addition. But the basic pattern is the same, and all the * tricks we learned when discussing matrix multiplication apply -- or * at least, they apply in principle. I'm actually going to be lazy * in the implementation of `square`, which computes one step of * this basic recurrence. I'm not trying to do any clever blocking. * You may choose to be more clever in your assignment, but it is not * required. * * The return value for `square` is true if `l` and `lnew` are * identical, and false otherwise. */ int square(int n, // Number of nodes int* restrict l, // Partial distance at step s int* restrict lnew, // Partial distance at step s+1 int rank, int* offsets) { int done = 1; //#pragma omp parallel for shared(l, lnew) reduction(&& : done) for (int j = offsets[rank]; j < offsets[rank+1]; ++j) { for (int i = 0; i < n; ++i) { int lij = l[j*n+i]; for (int k = 0; k < n; ++k) { int lik = l[k*n+i]; int lkj = l[j*n+k]; if (lik + lkj < lij) { lij = lik+lkj; done = 0; } } lnew[(j-offsets[rank])*n+i] = lij; } } return done; } int* alloc_partition(int n, int size) { int np = size; int* offsets = (int*) malloc((np+1) * sizeof(int)); for (int i = 0; i <= np; ++i) { long r = i*n; offsets[i] = (int) (r/np); } return offsets; } /** * * The value $l_{ij}^0$ is almost the same as the $(i,j)$ entry of * the adjacency matrix, except for one thing: by convention, the * $(i,j)$ entry of the adjacency matrix is zero when there is no * edge between $i$ and $j$; but in this case, we want $l_{ij}^0$ * to be "infinite". It turns out that it is adequate to make * $l_{ij}^0$ longer than the longest possible shortest path; if * edges are unweighted, $n+1$ is a fine proxy for "infinite." * The functions `infinitize` and `deinfinitize` convert back * and forth between the zero-for-no-edge and $n+1$-for-no-edge * conventions. */ static inline void infinitize(int n, int* l) { for (int i = 0; i < n*n; ++i) if (l[i] == 0) l[i] = n+1; } static inline void deinfinitize(int n, int* l) { for (int i = 0; i < n*n; ++i) if (l[i] == n+1) l[i] = 0; } /** * * Of course, any loop-free path in a graph with $n$ nodes can * at most pass through every node in the graph. Therefore, * once $2^s \geq n$, the quantity $l_{ij}^s$ is actually * the length of the shortest path of any number of hops. This means * we can compute the shortest path lengths for all pairs of nodes * in the graph by $\lceil \lg n \rceil$ repeated squaring operations. * * The `shortest_path` routine attempts to save a little bit of work * by only repeatedly squaring until two successive matrices are the * same (as indicated by the return value of the `square` routine). */ void shortest_paths(int n, int* restrict l, int rank, int size) { // Generate l_{ij}^0 from adjacency matrix representation if (rank == 0) { infinitize(n, l); for (int i = 0; i < n*n; i += n+1) l[i] = 0; } MPI_Bcast(l, n * n, MPI_INT, 0, MPI_COMM_WORLD); int* offsets = alloc_partition(n, size); int* restrict lnew = (int*) calloc((offsets[rank+1]-offsets[rank])*n, sizeof(int)); //memcpy(lnew, l, n*n * sizeof(int)); int global_done = 1, local_done; // Repeated squaring until nothing changes do { local_done = square(n, l, lnew, rank, offsets); MPI_Allgather(lnew, (offsets[rank+1] - offsets[rank])*n, MPI_INT, l, (offsets[rank+1] - offsets[rank])*n, MPI_INT, MPI_COMM_WORLD); MPI_Allreduce(&local_done, &global_done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); } while (!global_done); /* for (int i = 0; i <= size; i++) { */ /* printf("%d ", offsets[i]); */ /* } */ /* printf("\n"); */ /* printf("Hello from %d / %d n: %d \n", rank, size, n); */ /* for (int i = 0; i < n; i++) { */ /* for (int j = 0; j < n; j++) { */ /* printf("%d ", l[j*n+i]); */ /* } */ /* printf("\n"); */ /* } */ free(lnew); free(offsets); if (rank == 0) { deinfinitize(n, l); } } /** * # The random graph model * * Of course, we need to run the shortest path algorithm on something! * For the sake of keeping things interesting, let's use a simple random graph * model to generate the input data. The $G(n,p)$ model simply includes each * possible edge with probability $p$, drops it otherwise -- doesn't get much * simpler than that. We use a thread-safe version of the Mersenne twister * random number generator in lieu of coin flips. */ int* gen_graph(int n, double p) { int* l = calloc(n*n, sizeof(int)); struct mt19937p state; sgenrand(10302011UL, &state); for (int j = 0; j < n; ++j) { for (int i = 0; i < n; ++i) l[j*n+i] = (genrand(&state) < p); l[j*n+j] = 0; } return l; } /** * # Result checks * * Simple tests are always useful when tuning code, so I have included * two of them. Since this computation doesn't involve floating point * arithmetic, we should get bitwise identical results from run to * run, even if we do optimizations that change the associativity of * our computations. The function `fletcher16` computes a simple * [simple checksum][wiki-fletcher] over the output of the * `shortest_paths` routine, which we can then use to quickly tell * whether something has gone wrong. The `write_matrix` routine * actually writes out a text representation of the matrix, in case we * want to load it into MATLAB to compare results. * * [wiki-fletcher]: http://en.wikipedia.org/wiki/Fletcher's_checksum */ int fletcher16(int* data, int count) { int sum1 = 0; int sum2 = 0; for(int index = 0; index < count; ++index) { sum1 = (sum1 + data[index]) % 255; sum2 = (sum2 + sum1) % 255; } return (sum2 << 8) | sum1; } void write_matrix(const char* fname, int n, int* a) { FILE* fp = fopen(fname, "w+"); if (fp == NULL) { fprintf(stderr, "Could not open output file: %s\n", fname); exit(-1); } for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) fprintf(fp, "%d ", a[j*n+i]); fprintf(fp, "\n"); } fclose(fp); } /** * # The `main` event */ const char* usage = "path.x -- Parallel all-pairs shortest path on a random graph\n" "Flags:\n" " - n -- number of nodes (200)\n" " - p -- probability of including edges (0.05)\n" " - i -- file name where adjacency matrix should be stored (none)\n" " - o -- file name where output matrix should be stored (none)\n"; int main(int argc, char** argv) { int size; int rank; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); int n = 200; // Number of nodes double p = 0.05; // Edge probability const char* ifname = NULL; // Adjacency matrix file name const char* ofname = NULL; // Distance matrix file name if (rank == 0) { // Option processing extern char* optarg; const char* optstring = "hn:d:p:o:i:"; int c; while ((c = getopt(argc, argv, optstring)) != -1) { switch (c) { case 'h': fprintf(stderr, "%s", usage); return -1; case 'n': n = atoi(optarg); break; case 'p': p = atof(optarg); break; case 'o': ofname = optarg; break; case 'i': ifname = optarg; break; } } } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); int* l = gen_graph(n, p); if (rank == 0) { if (ifname) write_matrix(ifname, n, l); } // Graph generation + output // Time the shortest paths code #ifdef _OPENMP double t0 = omp_get_wtime(); shortest_paths(n, l, rank, size); double t1 = omp_get_wtime(); double elapsed = t1 - t0; #else struct timeval t0, t1; gettimeofday(&t0, NULL); shortest_paths(n, l, rank, size); gettimeofday(&t1, NULL); double elapsed = (t1.tv_sec-t0.tv_sec) + (t1.tv_usec-t0.tv_usec)*1e-6; #endif if (rank == 0) { printf("== MPI with %d ranks\n", size); printf("n: %d\n", n); printf("p: %g\n", p); printf("Time: %g\n", elapsed); printf("Check: %X\n", fletcher16(l, n*n)); // Generate output file if (ofname) write_matrix(ofname, n, l); } // Clean up free(l); MPI_Finalize(); return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-5,6)),ceild(8*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(4*t1+Ny+5,24)),floord(8*t2+Ny+4,24)),floord(8*t1-8*t2+Nz+Ny+3,24));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(8*t2-Nz-508,512)),ceild(24*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(4*t1+Nx+5,512)),floord(8*t2+Nx+4,512)),floord(24*t3+Nx+20,512)),floord(8*t1-8*t2+Nz+Nx+3,512));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),24*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),24*t3+22),512*t4+510),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__bor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int32) // C=scalar+B GB (_bind1st__bor_int32) // C=scalar+B' GB (_bind1st_tran__bor_int32) // C=A+scalar GB (_bind2nd__bor_int32) // C=A'+scalar GB (_bind2nd_tran__bor_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT32 || GxB_NO_BOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
oyranos_alpha.c
/** @file oyranos_alpha.c * * Oyranos is an open source Color Management System * * @par Copyright: * 2004-2012 (C) Kai-Uwe Behrmann * * @brief object APIs * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2004/11/25 */ #include "oyranos_config_internal.h" #include "oyCMMapis_s.h" #include "oyCMMapi6_s.h" #include "oyCMMapi8_s.h" #include "oyCMMapi9_s.h" #include "oyCMMui_s.h" #include "oyConfig_s.h" #include "oyConfigs_s.h" #include "oyConfig_s_.h" #include "oyConversion_s.h" #include "oyObserver_s.h" #include "oyOption_s.h" #include "oyOptions_s.h" #include "oyPointer_s.h" #include "oyProfile_s_.h" #include "oyStructList_s_.h" #include "oyranos_devices.h" #include "oyranos_types.h" #include "oyranos_alpha.h" #include "oyranos_color.h" #include "oyranos_alpha_internal.h" #include "oyranos_object_internal.h" #include "oyranos_generic_internal.h" #include "oyranos_cmm.h" #include "oyranos_db.h" #include "oyranos_helper.h" #include "oyranos_i18n.h" #include "oyranos_icc.h" #include "oyranos_io.h" #include "oyranos_sentinel.h" #include "oyranos_string.h" #include "oyranos_texts.h" #include <math.h> #include <locale.h> /* LC_NUMERIC */ #include <limits.h> #define OY_ERR if(l_error != 0) error = l_error; #ifdef DEBUG_ #define DEBUG_OBJECT 1 #endif #ifndef NO_OPT #define OY_USE_OBJECT_POOL_ 0 #endif /* internal declarations */ /** \addtogroup alpha Alpha API's * @{ *//* alpha */ #define PT_ANY 0 /* Don't check colorspace */ /* 1 & 2 are reserved */ #define PT_GRAY 3 #define PT_RGB 4 #define PT_CMY 5 #define PT_CMYK 6 #define PT_YCbCr 7 #define PT_YUV 8 /* Lu'v' */ #define PT_XYZ 9 #define PT_Lab 10 #define PT_YUVK 11 /* Lu'v'K */ #define PT_HSV 12 #define PT_HLS 13 #define PT_Yxy 14 #define PT_HiFi 15 /** * @internal * Function lcmsColorSpace * @brief get lcms notation from icColorSpaceSignature * * @since Oyranos: version 0.1.8 * @date 2008/01/21 (API 0.1.8) */ int lcmsColorSpace(icColorSpaceSignature ProfileSpace) { switch ((unsigned int)ProfileSpace) { case icSigGrayData: return PT_GRAY; case icSigRgbData: return PT_RGB; case icSigCmyData: return PT_CMY; case icSigCmykData: return PT_CMYK; case icSigYCbCrData:return PT_YCbCr; case icSigLuvData: return PT_YUV; case icSigXYZData: return PT_XYZ; case icSigLabData: return PT_Lab; case icSigLuvKData: return PT_YUVK; case icSigHsvData: return PT_HSV; case icSigHlsData: return PT_HLS; case icSigYxyData: return PT_Yxy; case icSig6colorData: case icSigMCH6Data: return PT_HiFi; default: return 0; } } /** Function oyDumpColorToCGATS * @brief create CGATS text from double value array and a profile * * The funtion operates on raw color values. Alpha is not supported. * The intention is to have a small debugging only tool. The values are * required to come prescaled (IE RGB 0...255, XYZ 0...1, CMYK 0...100 ...) * * @todo should be merged to some structure like oyImage_s. * * @since Oyranos: version 0.1.8 * @date 2008/01/21 (API 0.1.8) */ oyChar * oyDumpColorToCGATS ( const double * channels, size_t n, oyProfile_s * prof, oyAlloc_f allocateFunc, const oyChar * DESCRIPTOR ) { int channels_n = oyProfile_GetChannelsCount( prof ); int i; size_t len = n * channels_n * 16 + 2024; oyChar * text = allocateFunc( len ); const oyChar * prof_name = prof ? ((oyProfile_s_*)prof)->file_name_ : ""; const char * format = ""; char * daten = text; /*double value = 0;*/ const char *fomate[17] = { "SAMPLE_ID UNKNOWN 0", /* PT_ANY */ "SAMPLE_ID UNKNOWN 1", /* 1 */ "SAMPLE_ID UNKNOWN 2", /* 2 */ "SAMPLE_ID GRAY", /* 3 PT_GRAY */ "SAMPLE_ID RGB_R RGB_G RGB_B", /* 4 PT_RGB */ "SAMPLE_ID CMY_C CMY_M CMY_Y", /* 5 PT_CMY */ "SAMPLE_ID CMYK_C CMYK_M CMYK_Y CMYK_K", /* 6 PT_CMYK */ "SAMPLE_ID ", /* 7 PT_YCbCr */ "SAMPLE_ID ", /* 8 PT_YUV */ "SAMPLE_ID XYZ_X XYZ_Y XYZ_Z", /* 9 PT_XYZ */ "SAMPLE_ID LAB_L LAB_A LAB_B", /* 10 PT_Lab */ "SAMPLE_ID ", /* 11 PT_YUVK */ "SAMPLE_ID ", /* 12 PT_HSV */ "SAMPLE_ID ", /* 13 PT_HLS */ "SAMPLE_ID ", /* 14 PT_Yxy */ "SAMPLE_ID ", /* 15 PT_HiFi */ 0 }; icColorSpaceSignature cspace = oyProfile_GetSignature( prof, oySIGNATURE_COLOR_SPACE ); int lcms_space = lcmsColorSpace( cspace ); char * save_locale = oyStringCopy_( setlocale(LC_NUMERIC, 0 ), oyAllocateFunc_); format = fomate[lcms_space]; n *= channels_n; oySprintf_( daten , "OYCGATS\n" "\n" "ORIGINATOR \"%s\"\n" "DESCRIPTOR \"%s\"\n" "CREATED \"%s\"\n" "\n" "NUMBER_OF_FIELDS %d\n" "BEGIN_DATA_FORMAT\n" "%s\n" "END_DATA_FORMAT\n" "\n" "NUMBER_OF_SETS %d\n" "BEGIN_DATA\n", prof_name?prof_name:"--", DESCRIPTOR?DESCRIPTOR:"--", "--", channels_n + 1, format, (int)n/(channels_n?channels_n:-1) ); setlocale(LC_NUMERIC, "C"); for(i = 0; i < (int)n; ++i) { int modulo_k = i%(channels_n); if(modulo_k) oySprintf_( &daten[oyStrlen_(daten)], " "); else { if(i) oySprintf_( &daten[oyStrlen_(daten)], "\n"); oySprintf_( &daten[strlen(daten)], "%d ", (i/channels_n) + 1); } oySprintf_( &daten[oyStrlen_(daten)], "%.03f", channels[i] ); } oySprintf_( &daten[oyStrlen_(daten)], "\nEND_DATA\n"); setlocale(LC_NUMERIC, save_locale); oyFree_m_( save_locale ); return text; } /** @} *//* alpha */ int oyByteSwap ( void * data, int Bps, /**< Bytes per sample can be 2, 4 or 8 */ unsigned int count ) { unsigned int i; int error = 0; unsigned char * c, ct; uint16_t * u16 = (uint16_t*) data; uint32_t * u32 = (uint32_t*) data; uint64_t * u64 = (uint64_t*) data; if(!data) return -1; switch(Bps) { case 2: #pragma omp parallel for private(ct,c,i) for(i = 0; i < count; ++i) { c = (unsigned char*) &u16[i]; // endianess is wonderful stuff ct = c[0]; c[0] = c[1]; c[1] = ct; } break; case 4: #pragma omp parallel for private(ct,c,i) for(i = 0; i < count; ++i) { c = (unsigned char*) &u32[i]; ct = c[0]; c[0] = c[3]; c[3] = ct; ct = c[2]; c[2] = c[1]; c[1] = ct; } break; case 8: #pragma omp parallel for private(c,i) for(i = 0; i < count; ++i) { unsigned char uint64_c[8]; int little = 0, big = 8; c = (unsigned char*) &u64[i]; for (; little < 8 ; little++ ) uint64_c[little] = c[big--]; u64[i] = *((uint64_t*) &uint64_c[0]); } break; default: // should not happen error = 1; break; } return error; } /* --- Object handling API's start ------------------------------------ */ /** \addtogroup objects_generic Generic Objects * @{ *//* objects_generic */ /** Function oyCallback_New * @memberof oyCallback_s * @brief allocate a new Callback object * * @version Oyranos: 0.1.10 * @since 2009/11/18 (Oyranos: 0.1.10) * @date 2009/11/18 */ OYAPI oyCallback_s * OYEXPORT oyCallback_New ( oyObject_s object ) { /* ---- start of common object constructor ----- */ oyOBJECT_e type = oyOBJECT_CALLBACK_S; # define STRUCT_TYPE oyCallback_s int error = 0; oyObject_s s_obj = oyObject_NewFrom( object ); STRUCT_TYPE * s = 0; if(s_obj) s = (STRUCT_TYPE*)s_obj->allocateFunc_(sizeof(STRUCT_TYPE)); if(!s || !s_obj) { WARNc_S(_("MEM Error.")); oyObject_Release( &s_obj ); return NULL; } error = !memset( s, 0, sizeof(STRUCT_TYPE) ); if(error) WARNc_S( "memset failed" ); s->type_ = type; s->copy = (oyStruct_Copy_f) oyCallback_Copy; s->release = (oyStruct_Release_f) oyCallback_Release; s->oy_ = s_obj; error = !oyObject_SetParent( s_obj, type, (oyPointer)s ); if(error) WARNc_S( "oyObject_SetParent failed" ); # undef STRUCT_TYPE /* ---- end of common object constructor ------- */ return s; } /** @internal * Function oyCallback_Copy_ * @memberof oyCallback_s * @brief real copy a Callback object * * @param[in] obj struct object * @param object the optional object * * @version Oyranos: 0.1.10 * @since 2009/11/18 (Oyranos: 0.1.10) * @date 2009/11/18 */ oyCallback_s * oyCallback_Copy_ ( oyCallback_s * obj, oyObject_s object ) { oyCallback_s * s = 0; int error = 0; if(!obj || !object) return s; s = oyCallback_New( object ); error = !s; if(!error) { s->callback = obj->callback; s->data = obj->data; } if(error) oyCallback_Release( &s ); return s; } /** Function oyCallback_Copy * @memberof oyCallback_s * @brief copy or reference a Callback object * * @param[in] obj struct object * @param object the optional object * * @version Oyranos: 0.1.10 * @since 2009/11/18 (Oyranos: 0.1.10) * @date 2009/11/18 */ OYAPI oyCallback_s * OYEXPORT oyCallback_Copy ( oyCallback_s * obj, oyObject_s object ) { oyCallback_s * s = obj; if(!obj) return 0; oyCheckType__m( oyOBJECT_CALLBACK_S, return 0 ) if(obj && !object) { s = obj; oyObject_Copy( s->oy_ ); return s; } s = oyCallback_Copy_( obj, object ); return s; } /** Function oyCallback_Release * @memberof oyCallback_s * @brief release and possibly deallocate a Callback object * * @param[in,out] obj struct object * * @version Oyranos: 0.1.10 * @since 2009/11/18 (Oyranos: 0.1.10) * @date 2009/11/18 */ OYAPI int OYEXPORT oyCallback_Release ( oyCallback_s ** obj ) { /* ---- start of common object destructor ----- */ oyCallback_s * s = 0; if(!obj || !*obj) return 0; s = *obj; oyCheckType__m( oyOBJECT_CALLBACK_S, return 1 ) *obj = 0; if(oyObject_UnRef(s->oy_)) return 0; /* ---- end of common object destructor ------- */ if(s->oy_->deallocateFunc_) { oyDeAlloc_f deallocateFunc = s->oy_->deallocateFunc_; oyObject_Release( &s->oy_ ); deallocateFunc( s ); } return 0; } /** @} *//* objects_generic */ /** \addtogroup objects_conversion Conversion API's * @{ *//* objects_conversion */ /** @internal * @brief oyCMMProgress_f * * @since Oyranos: version 0.1.8 * @date 4 december 2007 (API 0.1.8) */ void oyCMMProgress_ ( int ID, double progress ) { printf("%s:%d id: %d %0.02f \n",__FILE__,__LINE__, ID, progress); } /** @} *//* objects_conversion */ /** \addtogroup monitor_api Monitor API * @brief Hardware detection and profile selection for monitors * The notation of screens for the X Window system is equal for traditional * and Xinerama screens. \n * The X11 libraries will be loaded at runtime. * * The functions can be studied by looking at the oyranos-monitor utiliy. * The code for that tool is in oyranos_gamma.c . * * @{ *//* monitor_api */ /** Function: oyGetMonitorInfo * @brief brief pick up monitor information with Xlib * * @deprecated because the functionality shall be integrated into a general framwork * * @param display_name the display string * @param[out] manufacturer the manufacturer of the monitor device * @param[out] model the model of the monitor device * @param[out] serial the serial number of the monitor device * @param[out] system_port the system port name the monitor device is connectted to * @param[out] display_geometry the display geometry description of the monitor * @param[out] edid the edid data blob of the monitor device * @param allocate_func the allocator for the above strings * @return error * * @version Oyranos: 0.1.8 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2008/10/24 */ OY_DEPRECATED int oyGetMonitorInfo ( const char * display_name OY_UNUSED, char ** manufacturer OY_UNUSED, char ** model OY_UNUSED, char ** serial OY_UNUSED, char ** system_port OY_UNUSED, char ** display_geometry OY_UNUSED, oyBlob_s ** edid OY_UNUSED, oyAlloc_f allocate_func OY_UNUSED ) { int error = 0; error = 1; return error; } /** Function: oyGetScreenFromPosition * @brief screen number from position * * @deprecated because the functionality shall be integrated into a general framwork * * This function will hit exact results only with Xinerama. \n * a platform specific function * * @param display_name the display string * @param x x position on screen * @param y y position on screen * @return screen * * @version Oyranos: 0.1.8 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2008/10/24 */ OY_DEPRECATED int oyGetScreenFromPosition ( const char * display_name OY_UNUSED, int x OY_UNUSED, int y OY_UNUSED ) { int screen = 0; return screen; } /** Function: oyGetDisplayNameFromPosition2 * @brief display name from position * * This function will hit exact results only with Xinerama. * * @param[in] device_type the device type, e.g. OY_TYPE_STD, * defaults to OY_TYPE_STD (optional) * @param[in] device_class registration ::oyFILTER_REG_APPLICATION * part, e.g. "monitor", mandatory * @param display_name raw display string * @param x x position on screen * @param y y position on screen * @param allocateFunc function used to allocate memory for the string * @return display name * * @version Oyranos: 0.1.13 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2010/11/12 */ OY_DEPRECATED char * oyGetDisplayNameFromPosition2(const char * device_type, const char * device_class, const char * display_name, int x, int y, oyAlloc_f allocateFunc ) { int error = 0; char * text = 0; oyConfig_s * device = 0; oyConfigs_s * devices = 0; oyOptions_s * options = 0; oyOption_s * o = 0; oyRectangle_s * r = 0; int n, i; const char * device_name = 0; error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/command", "list", OY_CREATE_NEW ); error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/device_rectangle", "true", OY_CREATE_NEW ); /** we want a fuzzy look at our display, not the narrow "device_name" */ error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/display_name", display_name, OY_CREATE_NEW ); error = oyConfigs_FromDeviceClass ( device_type, device_class, options, &devices, 0 ); if(error>0) WARNc1_S("oyConfigs_FromDeviceClass() returned: %d", error); oyOptions_Release( &options ); if(!allocateFunc) allocateFunc = oyAllocateFunc_; n = oyConfigs_Count( devices ); for( i = 0; i < n; ++i ) { device = oyConfigs_Get( devices, i ); o = oyConfig_Find( device, "device_rectangle" ); r = (oyRectangle_s*) oyOption_GetStruct( o, oyOBJECT_RECTANGLE_S ); if(!device_name && r && oyRectangle_PointIsInside( r, x,y )) { device_name = oyConfig_FindString( device, "device_name", 0 ); text = oyStringCopy_( device_name, allocateFunc ); } oyConfig_Release( &device ); } oyConfigs_Release( &devices ); return text; } /** Function: oyGetDisplayNameFromPosition * @brief display name from position * * This function will hit exact results only with Xinerama. * * @param display_name raw display string * @param x x position on screen * @param y y position on screen * @param allocateFunc function used to allocate memory for the string * @return display name * * @version Oyranos: 0.1.8 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2008/10/24 */ OY_DEPRECATED char * oyGetDisplayNameFromPosition( const char * display_name, int x, int y, oyAlloc_f allocateFunc ) { int error = 0; char * text = 0; oyConfig_s * device = 0; oyConfigs_s * devices = 0; oyOptions_s * options = 0; oyOption_s * o = 0; oyRectangle_s * r = 0; int n, i; const char * device_name = 0; error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/command", "list", OY_CREATE_NEW ); error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/device_rectangle", "true", OY_CREATE_NEW ); /** we want a fuzzy look at our display, not the narrow "device_name" */ error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/display_name", display_name, OY_CREATE_NEW ); error = oyConfigs_FromDeviceClass ( 0, "monitor", options, &devices, 0 ); if(error>0) WARNc1_S("oyConfigs_FromDeviceClass() returned: %d", error); oyOptions_Release( &options ); if(!allocateFunc) allocateFunc = oyAllocateFunc_; n = oyConfigs_Count( devices ); for( i = 0; i < n; ++i ) { device = oyConfigs_Get( devices, i ); o = oyConfig_Find( device, "device_rectangle" ); r = (oyRectangle_s*) oyOption_GetStruct( o, oyOBJECT_RECTANGLE_S ); if(!device_name && r && oyRectangle_PointIsInside( r, x,y )) { device_name = oyConfig_FindString( device, "device_name", 0 ); text = oyStringCopy_( device_name, allocateFunc ); } oyConfig_Release( &device ); } oyConfigs_Release( &devices ); return text; } /** Function: oyGetMonitorProfile * @brief get the monitor profile from the server * * @deprecated because the functionality shall be integrated into a general framwork * * @param device_name the display string * @param[out] size the size of profile * @param allocate_func function used to allocate memory for the profile * @return the memory block containing the profile * * @version Oyranos: 0.1.8 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2009/02/10 */ OY_DEPRECATED char * oyGetMonitorProfile ( const char * device_name, size_t * size, oyAlloc_f allocate_func ) { int error = 0; oyConfig_s * device = 0; oyOptions_s * options = 0; oyProfile_s * p = 0; char * block = 0; const char * device_type = OY_TYPE_STD, * device_class = "monitor"; if(error <= 0) { error = oyDeviceGet( device_type, device_class, device_name, options, &device ); error = oyDeviceGetProfile( device, 0, &p ); oyConfig_Release( &device ); } if(error <= 0) { block = oyProfile_GetMem( p, size, 0, allocate_func ); error = !block || !size; } return block; } /** Function: oyMonitorProfileNameFromDB * @brief get the monitor profile filename from the device * database * * @deprecated because the functionality shall be integrated into a general framwork * * @param device the device * @param allocateFunc user function used to allocate memory * @return the profile filename * * @version Oyranos: 0.1.10 * @since 2009/02/08 (Oyranos: 0.1.10) * @date 2009/02/08 */ OY_DEPRECATED char * oyMonitorProfileNameFromDB ( oyConfig_s * device, oyAlloc_f allocateFunc ) { int error = !device; oyProfile_s * p = 0; char * profile_name = 0; char * text = 0; if(!allocateFunc) allocateFunc = oyAllocateFunc_; if(error <= 0) { error = oyDeviceProfileFromDB( device, &profile_name, 0 ); } if(error <= 0) { if(profile_name && oyStrrchr_( profile_name, OY_SLASH_C ) != 0) profile_name = oyStrrchr_( profile_name, OY_SLASH_C ) + 1; text = oyStringCopy_( profile_name, allocateFunc ); } if(profile_name) oyFree_m_( profile_name ); oyProfile_Release( &p ); return text; } /** Function: oyGetMonitorProfileNameFromDB * @brief get the monitor profile filename from the device profile * database * * @deprecated because the functionality shall be integrated into a general framwork * * @param display_name the display string * @param allocateFunc function used to allocate memory for the string * @return the profiles filename (if localy available) * * @version Oyranos: 0.1.8 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2008/10/24 */ OY_DEPRECATED char * oyGetMonitorProfileNameFromDB(const char * display_name, oyAlloc_f allocateFunc ) { int error = !display_name || !display_name[0]; oyOptions_s * options = 0; oyConfig_s * device = 0; oyProfile_s * p = 0; char * profile_name = 0; char * text = 0; if(!allocateFunc) allocateFunc = oyAllocateFunc_; if(error > 0) { display_name = getenv( "DISPLAY" ); error = !display_name; } if(error > 0) { WARNc_S( "No display_name argument provided. Give up." ); return 0; } /** 1. get monitor device */ if(error <= 0) oyDeviceGet( OY_TYPE_STD, "monitor", display_name, options, &device ); oyOptions_Release( &options ); /** 2. check for success of device detection */ error = !device; if(error) { WARNc2_S( "%s: \"%s\"", _("Could not open device"), display_name ); return 0; } if(error <= 0) error = oyDeviceProfileFromDB( device, &profile_name, 0 ); if(error <= 0) { if(profile_name && oyStrrchr_( profile_name, OY_SLASH_C ) != 0) text = oyStringCopy_( oyStrrchr_( profile_name, OY_SLASH_C ) + 1, allocateFunc ); else text = oyStringCopy_( profile_name, allocateFunc ); } if(profile_name) oyFree_m_( profile_name ); oyProfile_Release( &p ); oyConfig_Release( &device ); return text; } /** Function: oySetMonitorProfile * @brief set the monitor profile by filename * * @deprecated because the functionality shall be integrated into a general framwork * * The profile_name argument does two things. Set to zero the function solely * unsets the graphic card luts and the server stored profile. So pretty all * server side information should go away. \n * With a profile name given the function will lookup the monitor in the * Oyranos device database and stores the given profile there. \n * To sum up, to set a new profile please call the following sequence: * @code // store new settings in the Oyranos data base oySetMonitorProfile( display_name, profile_name ); // remove the server entries oySetMonitorProfile( display_name, 0 ); // update the window server from the newly Oyranos data base settings oyActivateMonitorProfiles( display_name ); @endcode * * @param display_name the display string * @param profile_name the file to use as monitor profile or 0 to unset * @return error * * @version Oyranos: 0.1.8 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2009/01/28 */ OY_DEPRECATED int oySetMonitorProfile ( const char * display_name, const char * profile_name ) { int error = !display_name || !display_name[0]; oyOptions_s * options = 0; oyConfig_s * device = 0; if(error > 0) { WARNc1_S( "No display_name argument provided. Give up. %s", oyNoEmptyString_m_(profile_name) ); return error; } /** 1. obtain detailed and expensive device informations */ /** 1.1 add "properties" call to module arguments */ error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/command", "properties", OY_CREATE_NEW ); /** 1.2 get monitor device */ if(error <= 0) error = oyDeviceGet( OY_TYPE_STD, "monitor", display_name, options, &device ); oyOptions_Release( &options ); /** 2. check for success of device detection */ error = !device; if(error) { WARNc2_S( "%s: \"%s\"", _("Could not open device"), display_name ); return error; } if(profile_name) error = oyDeviceSetProfile( device, oySCOPE_USER, profile_name ); else error = oyDeviceUnset( device ); oyConfig_Release( &device ); return error; } /** Function: oyActivateMonitorProfiles * @brief activate the monitor using the stored configuration * * @deprecated because the functionality shall be integrated into a general framwork * * Activate in case the appropriate profile is not yet setup in the server. \n * To deactivate a profile in the server call * oySetMonitorProfile( device_name, 0 ). * * @see oySetMonitorProfile for permanently configuring a monitor * * @param display_name the device string * @return error * * @version Oyranos: 0.1.10 * @since 2005/00/00 (Oyranos: 0.1.x) * @date 2009/01/28 */ OY_DEPRECATED int oyActivateMonitorProfiles ( const char * display_name ) { int error = !display_name || !display_name[0]; oyOptions_s * options = 0; oyConfig_s * device = 0; oyConfigs_s * devices = 0; const char * device_class = "monitor"; int i, n; if(error > 0) { WARNc_S( "No device_name argument provided. Give up." ); return error; } { /* 1. set a general request */ error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/command", "list", OY_CREATE_NEW ); /* we want a fuzzy look at our display, not as narrow as "device_name"*/ error = oyOptions_SetFromString( &options, "//" OY_TYPE_STD "/config/display_name", display_name, OY_CREATE_NEW ); error = oyConfigs_FromDeviceClass ( 0, device_class, options, &devices, 0 ); n = oyConfigs_Count( devices ); for(i = 0; i < n; ++i) { device = oyConfigs_Get( devices, i ); oyDeviceSetup( device, options ); oyConfig_Release( &device ); } oyConfigs_Release( &devices ); oyOptions_Release( &options ); } return error; } /** @} *//* monitor_api */
IteratorTestHelper.h
/** * @file IteratorTestHelper.h * @author F. Gratl * @date 05.03.21 */ #pragma once #include <gtest/gtest.h> #include <array> #include <vector> #include "autopas/options/IteratorBehavior.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/WrapOpenMP.h" #include "autopas/utils/inBox.h" #include "testingHelpers/commonTypedefs.h" namespace IteratorTestHelper { /** * Inserts particles around all corners of the given AutoPas object at critical distances. * @tparam AutoPasT * @param autoPas * @param boxOfInterestMin * @param boxOfInterestMax * @return Tuple of four vectors containing IDs of added particles. * 1. All owned particles * 2. All halo particles. * 3. All owned particles in the box of interest. * 3. All halo particles in the box of interest. */ template <class AutoPasT> auto fillContainerAroundBoundary(AutoPasT &autoPas, std::array<double, 3> boxOfInterestMin, std::array<double, 3> boxOfInterestMax) { constexpr size_t numParticles1dTotal = 10; auto cutoff = autoPas.getCutoff(); auto skin = autoPas.getVerletSkin(); // generator function for critical coordinates (along one dimension) auto generateInteresting1DPositions = [&](double min, double max) -> auto { // ensure that all particles are at most skin away from halo! // interesting cases are: // - outside of the halo by skin // - edge of halo // - in the halo // - edge of actual domain // - just inside the domain return std::array<double, numParticles1dTotal>{min - cutoff - skin + 1e-10, min - cutoff, min - skin / 4, min, min + skin / 4, max - skin / 4, max, max + skin / 4, max + cutoff, max + cutoff + skin - 1e-10}; }; // fill container size_t id = 0; auto boxMin = autoPas.getBoxMin(); auto boxMax = autoPas.getBoxMax(); std::vector<size_t> particleIDsInterestHalo; std::vector<size_t> particleIDsInterestOwned; std::vector<size_t> particleIDsHalo; std::vector<size_t> particleIDsOwned; for (auto x : generateInteresting1DPositions(boxMin[0], boxMax[0])) { for (auto y : generateInteresting1DPositions(boxMin[1], boxMax[1])) { for (auto z : generateInteresting1DPositions(boxMin[2], boxMax[2])) { std::array<double, 3> pos{x, y, z}; Molecule p(pos, {0., 0., 0.}, id++, 0); // add the particle as actual or halo particle if (autopas::utils::inBox(pos, boxMin, boxMax)) { autoPas.addParticle(p); particleIDsOwned.push_back(p.getID()); if (autopas::utils::inBox(pos, boxOfInterestMin, boxOfInterestMax)) { particleIDsInterestOwned.push_back(p.getID()); } } else { // AutoPas should set the ownership state of this particle to halo autoPas.addOrUpdateHaloParticle(p); particleIDsHalo.push_back(p.getID()); if (autopas::utils::inBox(pos, boxOfInterestMin, boxOfInterestMax)) { particleIDsInterestHalo.push_back(p.getID()); } } } } } // sanity check. Can not use ASSERT_EQ because this introduces a different return. EXPECT_EQ(particleIDsOwned.size() + particleIDsHalo.size(), numParticles1dTotal * numParticles1dTotal * numParticles1dTotal); // getNumberOfParticles works via counters in the logic handler EXPECT_EQ(autoPas.getNumberOfParticles(autopas::IteratorBehavior::owned), particleIDsOwned.size()); EXPECT_EQ(autoPas.getNumberOfParticles(autopas::IteratorBehavior::halo), particleIDsHalo.size()); return std::make_tuple(particleIDsOwned, particleIDsHalo, particleIDsInterestOwned, particleIDsInterestHalo); } /** * Inserts particles around all corners of the given AutoPas object at critical distances. * @tparam AutoPasT * @param autoPas * @return Tuple of two vectors containing IDs of added particles. First for owned, second for halo particles. */ template <class AutoPasT> auto fillContainerAroundBoundary(AutoPasT &autoPas) { std::array<double, 3> numericLimitMin{std::numeric_limits<double>::min(), std::numeric_limits<double>::min(), std::numeric_limits<double>::min()}; std::array<double, 3> numericLimitMax{std::numeric_limits<double>::max(), std::numeric_limits<double>::max(), std::numeric_limits<double>::max()}; return fillContainerAroundBoundary(autoPas, numericLimitMin, numericLimitMax); } /** * Creats a grid of particles in the given AutoPas object. * Grid width is `sparsity * ( boxLength / ((cutoff + skin) * cellSizeFactor) )`. * E.g., for a sparsity of 1, 1 particle is inserted for every cell. For a sparsity of .5, 8 particles are inserted. * The lower corner of the grid is offset from boxMin by half the grid width in every dimension. * This way there should be one particle in every third Linked Cells cell. * @tparam AutoPasT * @param autoPas * @param sparsity * @return Vector of all particle IDs added. */ template <class AutoPasT> auto fillContainerWithGrid(AutoPasT &autoPas, double sparsity) { auto cutoff = autoPas.getCutoff(); auto skin = autoPas.getVerletSkin(); auto cellSizeFactor = *(autoPas.getAllowedCellSizeFactors().getAll().begin()); auto boxLength = autopas::utils::ArrayMath::sub(autoPas.getBoxMax(), autoPas.getBoxMin()); auto gridWidth1D = (cutoff + skin) * cellSizeFactor; auto gridEdgesPerDim = autopas::utils::ArrayMath::mulScalar(boxLength, 1 / gridWidth1D); auto gridWidth3D = autopas::utils::ArrayMath::div(boxLength, gridEdgesPerDim); size_t id = 0; std::vector<size_t> particleIDs; for (double x = gridWidth3D[0] / 2; x < boxLength[0]; x += sparsity * gridWidth3D[0]) { for (double y = gridWidth3D[1] / 2; y < boxLength[1]; y += sparsity * gridWidth3D[1]) { for (double z = gridWidth3D[2] / 2; z < boxLength[2]; z += sparsity * gridWidth3D[2]) { std::array<double, 3> pos{x, y, z}; Molecule p(pos, {0., 0., 0.}, id++, 0); autoPas.addParticle(p); particleIDs.push_back(p.getID()); } } } return particleIDs; } template <class AutoPasT> auto getHaloBoxMinMax(AutoPasT &autoPas) { const auto interactionLength = autoPas.getCutoff() + autoPas.getVerletSkin(); // halo has width of interactionLength const auto haloBoxMin = autopas::utils::ArrayMath::subScalar(autoPas.getBoxMin(), interactionLength); const auto haloBoxMax = autopas::utils::ArrayMath::addScalar(autoPas.getBoxMax(), interactionLength); return std::make_tuple(haloBoxMin, haloBoxMax); } /** * Creates a function to instantiate an iterator with the given properties and passes this function on to fun. * The iterator always covers the whole domain and, if necessary the halo. * This is necessary so that fun can decide for itself if it wants iterators to be created in an OpenMP region or not. * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param useRegionIterator * @param useConstIterator * @param behavior * @param autoPas * @param fun Function taking the AutoPas object and the generated iterator. */ template <bool useConstIterator, class AutoPasT, class F> void provideIterator(AutoPasT &autoPas, autopas::IteratorBehavior behavior, bool useRegionIterator, F fun) { if (useRegionIterator) { std::array<double, 3> haloBoxMin, haloBoxMax; std::tie(haloBoxMin, haloBoxMax) = getHaloBoxMinMax(autoPas); if constexpr (useConstIterator) { const auto &autoPasRef = autoPas; auto getIter = [&]() -> typename AutoPasT::const_iterator_t { return autoPasRef.getRegionIterator(haloBoxMin, haloBoxMax, behavior); }; fun(autoPasRef, getIter); } else { auto getIter = [&]() -> typename AutoPasT::iterator_t { return autoPas.getRegionIterator(haloBoxMin, haloBoxMax, behavior); }; fun(autoPas, getIter); } } else { if constexpr (useConstIterator) { auto getIter = [&]() -> typename AutoPasT::const_iterator_t { return autoPas.cbegin(behavior); }; fun(autoPas, getIter); } else { auto getIter = [&]() -> typename AutoPasT::iterator_t { return autoPas.begin(behavior); }; fun(autoPas, getIter); } } } /** * Same as provideIterator() but `useConstIterator` is a run-time variable. * @tparam useConstIterator * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param useRegionIterator * @param behavior * @param autoPas * @param fun Function taking the AutoPas object and the generated iterator. */ template <class AutoPasT, class F> void provideIterator(bool useConstIterator, AutoPasT &autoPas, autopas::IteratorBehavior behavior, bool useRegionIterator, F fun) { if (useConstIterator) { provideIterator<true>(autoPas, behavior, useRegionIterator, fun); } else { provideIterator<false>(autoPas, behavior, useRegionIterator, fun); } } /** * Creates a function to instantiate a region-iterator with the given properties and passes this function on to fun. * This is necessary so that fun can decide for itself if it wants iterators to be created in an OpenMP region or not. * @tparam useConstIterator * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param autoPas * @param behavior * @param boxMin * @param boxMax * @param fun Function taking the AutoPas object and the generated iterator. */ template <bool useConstIterator, class AutoPasT, class F> void provideRegionIterator(AutoPasT &autoPas, autopas::IteratorBehavior behavior, const std::array<double, 3> &boxMin, const std::array<double, 3> &boxMax, F fun) { if constexpr (useConstIterator) { const auto &autoPasRef = autoPas; auto getIter = [&]() -> typename AutoPasT::const_iterator_t { return autoPasRef.getRegionIterator(boxMin, boxMax, behavior); }; fun(autoPasRef, getIter); } else { auto getIter = [&]() -> typename AutoPasT::iterator_t { return autoPas.getRegionIterator(boxMin, boxMax, behavior); }; fun(autoPas, getIter); } } /** * Same as provideRegionIterator() but `useConstIterator` is a run-time variable. * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param useConstIterator * @param autoPas * @param behavior * @param boxMin * @param boxMax * @param fun Function taking the AutoPas object and the generated iterator. */ template <class AutoPasT, class F> void provideRegionIterator(bool useConstIterator, AutoPasT &autoPas, autopas::IteratorBehavior behavior, const std::array<double, 3> &boxMin, const std::array<double, 3> &boxMax, F fun) { if (useConstIterator) { provideRegionIterator<true>(autoPas, behavior, boxMin, boxMax, fun); } else { provideRegionIterator<false>(autoPas, behavior, boxMin, boxMax, fun); } } /** * Apply an iterator, track what particle IDs are found and compare this to a vector of expected IDs * @tparam AutoPasT * @tparam IteratorT * @param autopas * @param iterator * @param particleIDsExpected */ template <class AutoPasT, class FgetIter> void findParticles(AutoPasT &autopas, FgetIter getIter, const std::vector<size_t> &particleIDsExpected) { std::vector<size_t> particleIDsFound; #ifdef AUTOPAS_OPENMP // aparently the version from WrapOpenMP.h can not be found #pragma omp declare reduction(vecMergeWorkaround : std::vector<size_t> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #pragma omp parallel reduction(vecMergeWorkaround : particleIDsFound) #endif { for (auto iterator = getIter(); iterator.isValid(); ++iterator) { auto id = iterator->getID(); particleIDsFound.push_back(id); } } // check that everything was found EXPECT_THAT(particleIDsFound, ::testing::UnorderedElementsAreArray(particleIDsExpected)); } /** * Generates a given amount of cells where only indicated cells contain a given amount of particles. * Cells can be considered to be on the main diagonal through 3D space. So the xyz coordinates of each cell's lower * corner are {cellID, cellID, cellID}. The particles are also placed along this line within their cells. Within each * cell the particles are placed equidistant around the center. * @param numCells * @param cellsToFill * @param particlesPerCell * @return Vector of generated and filled cells. */ static std::vector<FMCell> generateCellsWithPattern(const size_t numCells, const std::vector<size_t> &cellsToFill, const size_t particlesPerCell) { constexpr double cellDiagonal = 1.; // distance between particles within one cell const double distBetweenParticles = cellDiagonal / (particlesPerCell + 1.); std::vector<FMCell> cells(numCells); size_t numParticlesAdded = 0; for (auto cellId : cellsToFill) { for (size_t i = 0; i < particlesPerCell; ++i) { auto position = cellId + distBetweenParticles * (i + 1.); Molecule m({position, position, position}, {0, 0, 0}, numParticlesAdded++, 0); cells[cellId].addParticle(m); } } return cells; } } // namespace IteratorTestHelper
DRB020-privatemissing-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* tmp should be put as private to avoid race condition Data race pair: tmp@65 vs. tmp@66 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int tmp; int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; #pragma omp parallel for simd for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for simd private(tmp) for (i=0;i<len;i++) { tmp =a[i]+i; a[i] = tmp; } #pragma omp parallel for simd ordered for (i=0;i<len;i++) #pragma omp ordered simd printf("%d\n",a[i]); return 0; }
GraphReconstructor.h
// // Copyright (C) 2015 Yahoo Japan Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #include <unordered_map> #include <unordered_set> #include <list> #ifdef _OPENMP #include <omp.h> #else #warning "*** OMP is *NOT* available! ***" #endif namespace NGT { class GraphReconstructor { public: static void extractGraph(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &graphIndex) { graph.reserve(graphIndex.repository.size()); for (size_t id = 1; id < graphIndex.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "GraphReconstructor::extractGraph: Processed " << id << " objects." << std::endl; } try { NGT::GraphNode &node = *graphIndex.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::ObjectDistances nd; nd.reserve(node.size()); for (auto n = node.begin(graphIndex.repository.allocator); n != node.end(graphIndex.repository.allocator); ++n) { nd.push_back(ObjectDistance((*n).id, (*n).distance)); } graph.push_back(nd); #else graph.push_back(node); #endif if (graph.back().size() != graph.back().capacity()) { std::cerr << "GraphReconstructor::extractGraph: Warning! The graph size must be the same as the capacity. " << id << std::endl; } } catch(NGT::Exception &err) { graph.push_back(NGT::ObjectDistances()); continue; } } } static void adjustPaths(NGT::Index &outIndex) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "construct index is not implemented." << std::endl; exit(1); #else NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); size_t rStartRank = 0; std::list<std::pair<size_t, NGT::GraphNode> > tmpGraph; for (size_t id = 1; id < outGraph.repository.size(); id++) { NGT::GraphNode &node = *outGraph.getNode(id); tmpGraph.push_back(std::pair<size_t, NGT::GraphNode>(id, node)); if (node.size() > rStartRank) { node.resize(rStartRank); } } size_t removeCount = 0; for (size_t rank = rStartRank; ; rank++) { bool edge = false; Timer timer; for (auto it = tmpGraph.begin(); it != tmpGraph.end();) { size_t id = (*it).first; try { NGT::GraphNode &node = (*it).second; if (rank >= node.size()) { it = tmpGraph.erase(it); continue; } edge = true; if (rank >= 1 && node[rank - 1].distance > node[rank].distance) { std::cerr << "distance order is wrong!" << std::endl; std::cerr << id << ":" << rank << ":" << node[rank - 1].id << ":" << node[rank].id << std::endl; } NGT::GraphNode &tn = *outGraph.getNode(id); volatile bool found = false; if (rank < 1000) { for (size_t tni = 0; tni < tn.size() && !found; tni++) { if (tn[tni].id == node[rank].id) { continue; } NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id); for (size_t dni = 0; dni < dstNode.size(); dni++) { if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) { found = true; break; } } } } else { #ifdef _OPENMP #pragma omp parallel for num_threads(10) #endif for (size_t tni = 0; tni < tn.size(); tni++) { if (found) { continue; } if (tn[tni].id == node[rank].id) { continue; } NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id); for (size_t dni = 0; dni < dstNode.size(); dni++) { if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) { found = true; } } } } if (!found) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) outGraph.addEdge(id, node.at(i, outGraph.repository.allocator).id, node.at(i, outGraph.repository.allocator).distance, true); #else tn.push_back(NGT::ObjectDistance(node[rank].id, node[rank].distance)); #endif } else { removeCount++; } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; it++; continue; } it++; } if (edge == false) { break; } } #endif // NGT_SHARED_MEMORY_ALLOCATOR } static void adjustPathsEffectively(NGT::Index &outIndex, size_t minNoOfEdges = 0) { NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); adjustPathsEffectively(outGraph, minNoOfEdges); } static bool edgeComp(NGT::ObjectDistance a, NGT::ObjectDistance b) { return a.id < b.id; } #if defined(NGT_SHARED_MEMORY_ALLOCATOR) static void insert(NGT::GraphNode &node, size_t edgeID, NGT::Distance edgeDistance, NGT::GraphIndex &graph) { NGT::ObjectDistance edge(edgeID, edgeDistance); GraphNode::iterator ni = std::lower_bound(node.begin(graph.repository.allocator), node.end(graph.repository.allocator), edge, edgeComp); node.insert(ni, edge, graph.repository.allocator); } static bool hasEdge(NGT::GraphIndex &graph, size_t srcNodeID, size_t dstNodeID) { NGT::GraphNode &srcNode = *graph.getNode(srcNodeID); GraphNode::iterator ni = std::lower_bound(srcNode.begin(graph.repository.allocator), srcNode.end(graph.repository.allocator), ObjectDistance(dstNodeID, 0.0), edgeComp); return (ni != srcNode.end(graph.repository.allocator)) && ((*ni).id == dstNodeID); } #else static void insert(NGT::GraphNode &node, size_t edgeID, NGT::Distance edgeDistance) { NGT::ObjectDistance edge(edgeID, edgeDistance); GraphNode::iterator ni = std::lower_bound(node.begin(), node.end(), edge, edgeComp); node.insert(ni, edge); } static bool hasEdge(NGT::GraphIndex &graph, size_t srcNodeID, size_t dstNodeID) { NGT::GraphNode &srcNode = *graph.getNode(srcNodeID); GraphNode::iterator ni = std::lower_bound(srcNode.begin(), srcNode.end(), ObjectDistance(dstNodeID, 0.0), edgeComp); return (ni != srcNode.end()) && ((*ni).id == dstNodeID); } #endif static void adjustPathsEffectively(NGT::GraphIndex &outGraph, size_t minNoOfEdges) { Timer timer; timer.start(); std::vector<NGT::GraphNode> tmpGraph; for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &node = *outGraph.getNode(id); tmpGraph.push_back(node); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.clear(outGraph.repository.allocator); #else node.clear(); #endif } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) tmpGraph.push_back(NGT::GraphNode(outGraph.repository.allocator)); #else tmpGraph.push_back(NGT::GraphNode()); #endif } } if (outGraph.repository.size() != tmpGraph.size() + 1) { std::stringstream msg; msg << "GraphReconstructor: Fatal inner error. " << outGraph.repository.size() << ":" << tmpGraph.size(); NGTThrowException(msg); } timer.stop(); std::cerr << "GraphReconstructor::adjustPaths: graph preparing time=" << timer << std::endl; timer.reset(); timer.start(); std::vector<std::vector<std::pair<uint32_t, uint32_t> > > removeCandidates(tmpGraph.size()); int removeCandidateCount = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (size_t idx = 0; idx < tmpGraph.size(); ++idx) { auto it = tmpGraph.begin() + idx; size_t id = idx + 1; try { NGT::GraphNode &srcNode = *it; std::unordered_map<uint32_t, std::pair<size_t, double> > neighbors; for (size_t sni = 0; sni < srcNode.size(); ++sni) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) neighbors[srcNode.at(sni, outGraph.repository.allocator).id] = std::pair<size_t, double>(sni, srcNode.at(sni, outGraph.repository.allocator).distance); #else neighbors[srcNode[sni].id] = std::pair<size_t, double>(sni, srcNode[sni].distance); #endif } std::vector<std::pair<int, std::pair<uint32_t, uint32_t> > > candidates; for (size_t sni = 0; sni < srcNode.size(); sni++) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::GraphNode &pathNode = tmpGraph[srcNode.at(sni, outGraph.repository.allocator).id - 1]; #else NGT::GraphNode &pathNode = tmpGraph[srcNode[sni].id - 1]; #endif for (size_t pni = 0; pni < pathNode.size(); pni++) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) auto dstNodeID = pathNode.at(pni, outGraph.repository.allocator).id; #else auto dstNodeID = pathNode[pni].id; #endif auto dstNode = neighbors.find(dstNodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) if (dstNode != neighbors.end() && srcNode.at(sni, outGraph.repository.allocator).distance < (*dstNode).second.second && pathNode.at(pni, outGraph.repository.allocator).distance < (*dstNode).second.second ) { #else if (dstNode != neighbors.end() && srcNode[sni].distance < (*dstNode).second.second && pathNode[pni].distance < (*dstNode).second.second ) { #endif #if defined(NGT_SHARED_MEMORY_ALLOCATOR) candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode.at(sni, outGraph.repository.allocator).id, dstNodeID))); #else candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode[sni].id, dstNodeID))); #endif removeCandidateCount++; } } } sort(candidates.begin(), candidates.end(), std::greater<std::pair<int, std::pair<uint32_t, uint32_t>>>()); removeCandidates[id - 1].reserve(candidates.size()); for (size_t i = 0; i < candidates.size(); i++) { removeCandidates[id - 1].push_back(candidates[i].second); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } timer.stop(); std::cerr << "GraphReconstructor::adjustPaths: extracting removed edge candidates time=" << timer << std::endl; timer.reset(); timer.start(); std::list<size_t> ids; for (size_t idx = 0; idx < tmpGraph.size(); ++idx) { ids.push_back(idx + 1); } int removeCount = 0; removeCandidateCount = 0; for (size_t rank = 0; ids.size() != 0; rank++) { for (auto it = ids.begin(); it != ids.end(); ) { size_t id = *it; size_t idx = id - 1; try { NGT::GraphNode &srcNode = tmpGraph[idx]; if (rank >= srcNode.size()) { if (!removeCandidates[idx].empty() && minNoOfEdges == 0) { std::cerr << "Something wrong! ID=" << id << " # of remaining candidates=" << removeCandidates[idx].size() << std::endl; abort(); } #if !defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::GraphNode empty; tmpGraph[idx] = empty; #endif it = ids.erase(it); continue; } if (removeCandidates[idx].size() > 0 && ((*outGraph.getNode(id)).size() + srcNode.size() - rank) > minNoOfEdges) { removeCandidateCount++; bool pathExist = false; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) { #else while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) { #endif size_t path = removeCandidates[idx].back().first; size_t dst = removeCandidates[idx].back().second; removeCandidates[idx].pop_back(); if (removeCandidates[idx].empty()) { std::vector<std::pair<uint32_t, uint32_t>> empty; removeCandidates[idx] = empty; } if ((hasEdge(outGraph, id, path)) && (hasEdge(outGraph, path, dst))) { pathExist = true; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) { #else while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) { #endif removeCandidates[idx].pop_back(); if (removeCandidates[idx].empty()) { std::vector<std::pair<uint32_t, uint32_t>> empty; removeCandidates[idx] = empty; } } break; } } if (pathExist) { removeCount++; it++; continue; } } NGT::GraphNode &outSrcNode = *outGraph.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) insert(outSrcNode, srcNode.at(rank, outGraph.repository.allocator).id, srcNode.at(rank, outGraph.repository.allocator).distance, outGraph); #else insert(outSrcNode, srcNode[rank].id, srcNode[rank].distance); #endif } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; it++; continue; } it++; } } for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &node = *outGraph.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::sort(node.begin(outGraph.repository.allocator), node.end(outGraph.repository.allocator)); #else std::sort(node.begin(), node.end()); #endif } catch(...) {} } } static void convertToANNG(std::vector<NGT::ObjectDistances> &graph) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "convertToANNG is not implemented for shared memory." << std::endl; return; #else std::cerr << "convertToANNG begin" << std::endl; for (size_t idx = 0; idx < graph.size(); idx++) { NGT::GraphNode &node = graph[idx]; for (auto ni = node.begin(); ni != node.end(); ++ni) { graph[(*ni).id - 1].push_back(NGT::ObjectDistance(idx + 1, (*ni).distance)); } } for (size_t idx = 0; idx < graph.size(); idx++) { NGT::GraphNode &node = graph[idx]; if (node.size() == 0) { continue; } std::sort(node.begin(), node.end()); NGT::ObjectID prev = 0; for (auto it = node.begin(); it != node.end();) { if (prev == (*it).id) { it = node.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = node; node.swap(tmp); } std::cerr << "convertToANNG end" << std::endl; #endif } static void reconstructGraph(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &outGraph, size_t originalEdgeSize, size_t reverseEdgeSize) { if (reverseEdgeSize > 10000) { std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl; exit(1); } NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer; originalEdgeTimer.start(); size_t warningCount = 0; const size_t warningLimit = 10; for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &node = *outGraph.getNode(id); if (originalEdgeSize == 0) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.clear(outGraph.repository.allocator); #else NGT::GraphNode empty; node.swap(empty); #endif } else { NGT::ObjectDistances n = graph[id - 1]; if (n.size() < originalEdgeSize) { warningCount++; if (warningCount <= warningLimit) { std::cerr << "GraphReconstructor: Warning. The edges are too few. " << n.size() << ":" << originalEdgeSize << " for " << id << std::endl; } if (warningCount == warningLimit) { std::cerr << "GraphReconstructor: Info. Too many warnings. Warning is disabled." << std::endl; } continue; } n.resize(originalEdgeSize); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.copy(n, outGraph.repository.allocator); #else node.swap(n); #endif } } catch(NGT::Exception &err) { warningCount++; if (warningCount <= warningLimit) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; } if (warningCount == warningLimit) { std::cerr << "GraphReconstructor: Info. Too many warnings. Warning is disabled." << std::endl; } continue; } } if (warningCount > warningLimit) { std::cerr << "GraphReconstructor: The total " << warningCount << " Warnings." << std::endl; } originalEdgeTimer.stop(); reverseEdgeTimer.start(); int insufficientNodeCount = 0; for (size_t id = 1; id <= graph.size(); ++id) { try { NGT::ObjectDistances &node = graph[id - 1]; size_t rsize = reverseEdgeSize; if (rsize > node.size()) { insufficientNodeCount++; rsize = node.size(); } for (size_t i = 0; i < rsize; ++i) { NGT::Distance distance = node[i].distance; size_t nodeID = node[i].id; try { NGT::GraphNode &n = *outGraph.getNode(nodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) n.push_back(NGT::ObjectDistance(id, distance), outGraph.repository.allocator); #else n.push_back(NGT::ObjectDistance(id, distance)); #endif } catch(...) {} } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } reverseEdgeTimer.stop(); if (insufficientNodeCount != 0) { std::cerr << "# of the nodes edges of which are in short = " << insufficientNodeCount << std::endl; } normalizeEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); if (id % 100000 == 0) { std::cerr << "Processed " << id << " nodes" << std::endl; } #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::sort(n.begin(outGraph.repository.allocator), n.end(outGraph.repository.allocator)); #else std::sort(n.begin(), n.end()); #endif NGT::ObjectID prev = 0; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) for (auto it = n.begin(outGraph.repository.allocator); it != n.end(outGraph.repository.allocator);) { #else for (auto it = n.begin(); it != n.end();) { #endif if (prev == (*it).id) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) it = n.erase(it, outGraph.repository.allocator); #else it = n.erase(it); #endif continue; } prev = (*it).id; it++; } #if !defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::GraphNode tmp = n; n.swap(tmp); #endif } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } normalizeEdgeTimer.stop(); std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time << ":" << normalizeEdgeTimer.time << std::endl; NGT::Property prop; outGraph.getProperty().get(prop); prop.graphType = NGT::NeighborhoodGraph::GraphTypeONNG; outGraph.getProperty().set(prop); } static void reconstructGraphWithConstraint(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &outGraph, size_t originalEdgeSize, size_t reverseEdgeSize, char mode = 'a') { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "reconstructGraphWithConstraint is not implemented." << std::endl; abort(); #else NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer; if (reverseEdgeSize > 10000) { std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl; exit(1); } for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << std::endl; } try { NGT::GraphNode &node = *outGraph.getNode(id); if (node.size() == 0) { continue; } node.clear(); NGT::GraphNode empty; node.swap(empty); } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } NGT::GraphIndex::showStatisticsOfGraph(outGraph); std::vector<ObjectDistances> reverse(graph.size() + 1); for (size_t id = 1; id <= graph.size(); ++id) { try { NGT::GraphNode &node = graph[id - 1]; if (id % 100000 == 0) { std::cerr << "Processed (summing up) " << id << std::endl; } for (size_t rank = 0; rank < node.size(); rank++) { reverse[node[rank].id].push_back(ObjectDistance(id, node[rank].distance)); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } std::vector<std::pair<size_t, size_t> > reverseSize(graph.size() + 1); reverseSize[0] = std::pair<size_t, size_t>(0, 0); for (size_t rid = 1; rid <= graph.size(); ++rid) { reverseSize[rid] = std::pair<size_t, size_t>(reverse[rid].size(), rid); } std::sort(reverseSize.begin(), reverseSize.end()); std::vector<uint32_t> indegreeCount(graph.size(), 0); size_t zeroCount = 0; for (size_t sizerank = 0; sizerank <= reverseSize.size(); sizerank++) { if (reverseSize[sizerank].first == 0) { zeroCount++; continue; } size_t rid = reverseSize[sizerank].second; ObjectDistances &rnode = reverse[rid]; for (auto rni = rnode.begin(); rni != rnode.end(); ++rni) { if (indegreeCount[(*rni).id] >= reverseEdgeSize) { continue; } NGT::GraphNode &node = *outGraph.getNode(rid); if (indegreeCount[(*rni).id] > 0 && node.size() >= originalEdgeSize) { continue; } node.push_back(NGT::ObjectDistance((*rni).id, (*rni).distance)); indegreeCount[(*rni).id]++; } } reverseEdgeTimer.stop(); std::cerr << "The number of nodes with zero outdegree by reverse edges=" << zeroCount << std::endl; NGT::GraphIndex::showStatisticsOfGraph(outGraph); normalizeEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); if (id % 100000 == 0) { std::cerr << "Processed " << id << std::endl; } std::sort(n.begin(), n.end()); NGT::ObjectID prev = 0; for (auto it = n.begin(); it != n.end();) { if (prev == (*it).id) { it = n.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = n; n.swap(tmp); } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } normalizeEdgeTimer.stop(); NGT::GraphIndex::showStatisticsOfGraph(outGraph); originalEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << std::endl; } NGT::GraphNode &node = graph[id - 1]; try { NGT::GraphNode &onode = *outGraph.getNode(id); bool stop = false; for (size_t rank = 0; (rank < node.size() && rank < originalEdgeSize) && stop == false; rank++) { switch (mode) { case 'a': if (onode.size() >= originalEdgeSize) { stop = true; continue; } break; case 'c': break; } NGT::Distance distance = node[rank].distance; size_t nodeID = node[rank].id; outGraph.addEdge(id, nodeID, distance, false); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } originalEdgeTimer.stop(); NGT::GraphIndex::showStatisticsOfGraph(outGraph); std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time << ":" << normalizeEdgeTimer.time << std::endl; #endif } // reconstruct a pseudo ANNG with a fewer edges from an actual ANNG with more edges. // graph is a source ANNG // index is an index with a reconstructed ANNG static void reconstructANNGFromANNG(std::vector<NGT::ObjectDistances> &graph, NGT::Index &index, size_t edgeSize) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "reconstructANNGFromANNG is not implemented." << std::endl; abort(); #else NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(index.getIndex()); // remove all edges in the index. for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << " nodes." << std::endl; } try { NGT::GraphNode &node = *outGraph.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.clear(outGraph.repository.allocator); #else NGT::GraphNode empty; node.swap(empty); #endif } catch(NGT::Exception &err) { } } for (size_t id = 1; id <= graph.size(); ++id) { size_t edgeCount = 0; try { NGT::ObjectDistances &node = graph[id - 1]; NGT::GraphNode &n = *outGraph.getNode(id); NGT::Distance prevDistance = 0.0; assert(n.size() == 0); for (size_t i = 0; i < node.size(); ++i) { NGT::Distance distance = node[i].distance; if (prevDistance > distance) { NGTThrowException("Edge distance order is invalid"); } prevDistance = distance; size_t nodeID = node[i].id; if (node[i].id < id) { try { NGT::GraphNode &dn = *outGraph.getNode(nodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) n.push_back(NGT::ObjectDistance(nodeID, distance), outGraph.repository.allocator); dn.push_back(NGT::ObjectDistance(id, distance), outGraph.repository.allocator); #else n.push_back(NGT::ObjectDistance(nodeID, distance)); dn.push_back(NGT::ObjectDistance(id, distance)); #endif } catch(...) {} edgeCount++; } if (edgeCount >= edgeSize) { break; } } } catch(NGT::Exception &err) { } } for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); std::sort(n.begin(), n.end()); NGT::ObjectID prev = 0; for (auto it = n.begin(); it != n.end();) { if (prev == (*it).id) { it = n.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = n; n.swap(tmp); } catch (...) { } } #endif } static void refineANNG(NGT::Index &index, bool unlog, float epsilon = 0.1, float accuracy = 0.0, int noOfEdges = 0, int exploreEdgeSize = INT_MIN, size_t batchSize = 10000) { NGT::StdOstreamRedirector redirector(unlog); redirector.begin(); try { refineANNG(index, epsilon, accuracy, noOfEdges, exploreEdgeSize, batchSize); } catch (NGT::Exception &err) { redirector.end(); throw(err); } } static void refineANNG(NGT::Index &index, float epsilon = 0.1, float accuracy = 0.0, int noOfEdges = 0, int exploreEdgeSize = INT_MIN, size_t batchSize = 10000) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) NGTThrowException("GraphReconstructor::refineANNG: Not implemented for the shared memory option."); #else auto prop = static_cast<GraphIndex&>(index.getIndex()).getGraphProperty(); NGT::ObjectRepository &objectRepository = index.getObjectSpace().getRepository(); NGT::GraphIndex &graphIndex = static_cast<GraphIndex&>(index.getIndex()); size_t nOfObjects = objectRepository.size(); bool error = false; std::string errorMessage; size_t noOfSearchedEdges = noOfEdges < 0 ? -noOfEdges : (noOfEdges > prop.edgeSizeForCreation ? noOfEdges : prop.edgeSizeForCreation); noOfSearchedEdges++; for (size_t bid = 1; bid < nOfObjects; bid += batchSize) { NGT::ObjectDistances results[batchSize]; // search #pragma omp parallel for for (size_t idx = 0; idx < batchSize; idx++) { size_t id = bid + idx; if (id % 100000 == 0) { std::cerr << "# of processed objects=" << id << std::endl; } if (objectRepository.isEmpty(id)) { continue; } NGT::SearchContainer searchContainer(*objectRepository.get(id)); searchContainer.setResults(&results[idx]); assert(prop.edgeSizeForCreation > 0); searchContainer.setSize(noOfSearchedEdges); if (accuracy > 0.0) { searchContainer.setExpectedAccuracy(accuracy); } else { searchContainer.setEpsilon(epsilon); } if (exploreEdgeSize != INT_MIN) { searchContainer.setEdgeSize(exploreEdgeSize); } if (!error) { try { index.search(searchContainer); } catch (NGT::Exception &err) { #pragma omp critical { error = true; errorMessage = err.what(); } } } } if (error) { std::stringstream msg; msg << "GraphReconstructor::refineANNG: " << errorMessage; NGTThrowException(msg); } // outgoing edges #pragma omp parallel for for (size_t idx = 0; idx < batchSize; idx++) { size_t id = bid + idx; if (objectRepository.isEmpty(id)) { continue; } NGT::GraphNode &node = *graphIndex.getNode(id); for (auto i = results[idx].begin(); i != results[idx].end(); ++i) { if ((*i).id != id) { node.push_back(*i); } } std::sort(node.begin(), node.end()); // dedupe ObjectID prev = 0; for (GraphNode::iterator ni = node.begin(); ni != node.end();) { if (prev == (*ni).id) { ni = node.erase(ni); continue; } prev = (*ni).id; ni++; } } // incomming edges if (noOfEdges != 0) { continue; } for (size_t idx = 0; idx < batchSize; idx++) { size_t id = bid + idx; if (id % 10000 == 0) { std::cerr << "# of processed objects=" << id << std::endl; } for (auto i = results[idx].begin(); i != results[idx].end(); ++i) { if ((*i).id != id) { NGT::GraphNode &node = *graphIndex.getNode((*i).id); graphIndex.addEdge(node, id, (*i).distance, false); } } } } if (noOfEdges > 0) { // prune to build knng size_t nedges = noOfEdges < 0 ? -noOfEdges : noOfEdges; #pragma omp parallel for for (ObjectID id = 1; id < nOfObjects; ++id) { if (objectRepository.isEmpty(id)) { continue; } NGT::GraphNode &node = *graphIndex.getNode(id); if (node.size() > nedges) { node.resize(nedges); } } } #endif // defined(NGT_SHARED_MEMORY_ALLOCATOR) } }; }; // NGT
opencl_sxc_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sxc); #else #include <string.h> #include <stdint.h> #include <openssl/blowfish.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "sha.h" #include "aes.h" #include "formats.h" #include "common.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #define FORMAT_LABEL "sxc-opencl" #define FORMAT_NAME "StarOffice .sxc" #define FORMAT_TAG "$sxc$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(sxc_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 typedef struct { uint32_t length; uint8_t v[20]; // hash of password } sxc_password; typedef struct { uint32_t v[16/4]; } sxc_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } sxc_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[32 / sizeof(uint32_t)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } sxc_cpu_salt; static sxc_cpu_salt *cur_salt; static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; static cl_int cl_error; static sxc_password *inbuffer; static sxc_hash *outbuffer; static sxc_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(sxc_password) * gws; outsize = sizeof(sxc_hash) * gws; settingsize = sizeof(sxc_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(gws, sizeof(*saved_key)); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(sxc_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static sxc_cpu_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (sxc_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char hash[20]; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA1_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 20); inbuffer[index].length = 20; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { BF_KEY bf_key; SHA_CTX ctx; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char*)outbuffer[index].v); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { sxc_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_opencl_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "iteration count", }, { FORMAT_TAG }, sxc_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users = threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #if !EIGEN_HAS_CXX11_ATOMIC #pragma omp atomic #endif info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program // to determine the following heuristic. // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, // unless it has been specialized by the user or for a given architecture. // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs. // I'm not sure it is still required. if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>()); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
mufunc_object.c
/* * Python Universal Functions Object -- Math for all types, plus fast * arrays math * * Full description * * This supports mathematical (and Boolean) functions on arrays and other python * objects. Math on large arrays of basic C types is rather efficient. * * Travis E. Oliphant 2005, 2006 oliphant@ee.byu.edu (oliphant.travis@ieee.org) * Brigham Young University * * based on the * * Original Implementation: * Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin@mit.edu * * with inspiration and code from * Numarray * Space Science Telescope Institute * J. Todd Miller * Perry Greenfield * Rick White * */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "Python.h" #include "npy_config.h" #define PY_ARRAY_UNIQUE_SYMBOL _mpy_umathmodule_ARRAY_API #define NO_IMPORT_ARRAY #define PY_UFUNC_UNIQUE_SYMBOL _mpy_umathmodule_UFUNC_API #define NO_IMPORT_UFUNC #include <numpy/npy_3kcompat.h> #include <numpy/arrayobject.h> #include <numpy/ufuncobject.h> #include <numpy/arrayscalars.h> #include <lowlevel_strided_loops.h> #define PyMicArray_API_UNIQUE_NAME _mpy_umathmodule_MICARRAY_API #define PyMicArray_NO_IMPORT #include <multiarray/arrayobject.h> #include <multiarray/multiarray_api.h> #include <multiarray/mpy_common.h> #include <multiarray/common.h> #define _MICARRAY_UMATHMODULE #include "mufunc_object.h" #include "output_creators.h" #include "reduction.h" /* Some useful macros */ #define CPU_DEVICE (omp_get_initial_device()) #define PyMicArray_TRIVIAL_PAIR_ITERATION_STRIDE(size, arr) ( \ size == 1 ? 0 : ((PyMicArray_NDIM(arr) == 1) ? \ PyMicArray_STRIDE(arr, 0) : \ PyMicArray_ITEMSIZE(arr))) #define PyMicArray_TRIVIALLY_ITERABLE(arr) \ PyArray_TRIVIALLY_ITERABLE((PyArrayObject *)arr) #define PyMicArray_PREPARE_TRIVIAL_ITERATION(arr, count, data, stride) \ count = PyMicArray_SIZE(arr); \ data = (npy_intp) PyMicArray_BYTES(arr); \ stride = ((PyMicArray_NDIM(arr) == 0) ? 0 : \ ((PyMicArray_NDIM(arr) == 1) ? \ PyMicArray_STRIDE(arr, 0) : \ PyMicArray_ITEMSIZE(arr))); #define PyMicArray_TRIVIALLY_ITERABLE_PAIR(arr1, arr2, arr1_read, arr2_read) \ PyArray_TRIVIALLY_ITERABLE_PAIR(\ (PyArrayObject *)arr1,(PyArrayObject *)arr2, arr1_read, arr2_read) #define PyMicArray_PREPARE_TRIVIAL_PAIR_ITERATION(arr1, arr2, \ count, \ data1, data2, \ stride1, stride2) { \ npy_intp size1 = PyMicArray_SIZE(arr1); \ npy_intp size2 = PyMicArray_SIZE(arr2); \ count = ((size1 > size2) || size1 == 0) ? size1 : size2; \ data1 = PyMicArray_BYTES(arr1); \ data2 = PyMicArray_BYTES(arr2); \ stride1 = PyMicArray_TRIVIAL_PAIR_ITERATION_STRIDE(size1, arr1); \ stride2 = PyMicArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2); \ } #define PyMicArray_TRIVIALLY_ITERABLE_TRIPLE(arr1, arr2, arr3, arr1_read, arr2_read, arr3_read) \ PyArray_TRIVIALLY_ITERABLE_TRIPLE(\ (PyArrayObject *)arr1,\ (PyArrayObject *)arr2,\ (PyArrayObject *)arr3,\ arr1_read, arr2_read, arr3_read) #define PyMicArray_PREPARE_TRIVIAL_TRIPLE_ITERATION(arr1, arr2, arr3, \ count, \ data1, data2, data3, \ stride1, stride2, stride3) { \ npy_intp size1 = PyMicArray_SIZE(arr1); \ npy_intp size2 = PyMicArray_SIZE(arr2); \ npy_intp size3 = PyMicArray_SIZE(arr3); \ count = ((size1 > size2) || size1 == 0) ? size1 : size2; \ count = ((size3 > count) || size3 == 0) ? size3 : count; \ data1 = PyMicArray_BYTES(arr1); \ data2 = PyMicArray_BYTES(arr2); \ data3 = PyMicArray_BYTES(arr3); \ stride1 = PyMicArray_TRIVIAL_PAIR_ITERATION_STRIDE(size1, arr1); \ stride2 = PyMicArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2); \ stride3 = PyMicArray_TRIVIAL_PAIR_ITERATION_STRIDE(size3, arr3); \ } /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 #if NPY_UF_DBG_TRACING #define NPY_UF_DBG_PRINT(s) {printf("%s", s);fflush(stdout);} #define NPY_UF_DBG_PRINT1(s, p1) {printf((s), (p1));fflush(stdout);} #define NPY_UF_DBG_PRINT2(s, p1, p2) {printf(s, p1, p2);fflush(stdout);} #define NPY_UF_DBG_PRINT3(s, p1, p2, p3) {printf(s, p1, p2, p3);fflush(stdout);} #else #define NPY_UF_DBG_PRINT(s) #define NPY_UF_DBG_PRINT1(s, p1) #define NPY_UF_DBG_PRINT2(s, p1, p2) #define NPY_UF_DBG_PRINT3(s, p1, p2, p3) #endif /**********************************************/ /********************/ #define USE_USE_DEFAULTS 1 /********************/ /* ---------------------------------------------------------------- */ static int _does_loop_use_arrays(void *data); static int _extract_pyvals(PyObject *ref, const char *name, int *bufsize, int *errmask, PyObject **errobj); static int assign_reduce_identity_zero(PyMicArrayObject *result, void *data); static int assign_reduce_identity_minusone(PyMicArrayObject *result, void *data); static int assign_reduce_identity_one(PyMicArrayObject *result, void *data); /* * Determine whether all array is on the same device * Return 0 on success and -1 when fail */ static int _on_same_device(PyUFuncObject *ufunc, PyMicArrayObject **op) { int nop = ufunc->nin + ufunc->nout; if (nop <= 0) { return -1; } int i; int device = PyMicArray_DEVICE(op[0]); for (i = 1; i < nop; ++i) { if (op[i] != NULL && PyMicArray_DEVICE(op[i]) != device) { return -1; } } return 0; } /* * fpstatus is the ufunc_formatted hardware status * errmask is the handling mask specified by the user. * errobj is a Python object with (string, callable object or None) * or NULL */ /* * 2. for each of the flags * determine whether to ignore, warn, raise error, or call Python function. * If ignore, do nothing * If warn, print a warning and continue * If raise return an error * If call, call a user-defined function with string */ #if USE_USE_DEFAULTS==1 static int PyUFunc_NUM_NODEFAULTS = 0; #endif static PyObject * get_global_ext_obj(void) { PyObject *thedict; PyObject *ref = NULL; #if USE_USE_DEFAULTS==1 if (PyUFunc_NUM_NODEFAULTS != 0) { #endif thedict = PyThreadState_GetDict(); if (thedict == NULL) { thedict = PyEval_GetBuiltins(); } ref = PyDict_GetItem(thedict, mpy_um_str_pyvals_name); #if USE_USE_DEFAULTS==1 } #endif return ref; } static int _get_bufsize_errmask(PyObject * extobj, const char *ufunc_name, int *buffersize, int *errormask) { /* Get the buffersize and errormask */ if (extobj == NULL) { extobj = get_global_ext_obj(); } if (_extract_pyvals(extobj, ufunc_name, buffersize, errormask, NULL) < 0) { return -1; } return 0; } static int _extract_pyvals(PyObject *ref, const char *name, int *bufsize, int *errmask, PyObject **errobj) { PyObject *retval; /* default errobj case, skips dictionary lookup */ if (ref == NULL) { if (errmask) { *errmask = UFUNC_ERR_DEFAULT; } if (errobj) { *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None); } if (bufsize) { *bufsize = NPY_BUFSIZE; } return 0; } if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) { PyErr_Format(PyExc_TypeError, "%s must be a length 3 list.", MUFUNC_PYVALS_NAME); return -1; } if (bufsize != NULL) { *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0)); if ((*bufsize == -1) && PyErr_Occurred()) { return -1; } if ((*bufsize < NPY_MIN_BUFSIZE) || (*bufsize > NPY_MAX_BUFSIZE) || (*bufsize % 16 != 0)) { PyErr_Format(PyExc_ValueError, "buffer size (%d) is not in range " "(%"NPY_INTP_FMT" - %"NPY_INTP_FMT") or not a multiple of 16", *bufsize, (npy_intp) NPY_MIN_BUFSIZE, (npy_intp) NPY_MAX_BUFSIZE); return -1; } } if (errmask != NULL) { *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1)); if (*errmask < 0) { if (PyErr_Occurred()) { return -1; } PyErr_Format(PyExc_ValueError, "invalid error mask (%d)", *errmask); return -1; } } if (errobj != NULL) { *errobj = NULL; retval = PyList_GET_ITEM(ref, 2); if (retval != Py_None && !PyCallable_Check(retval)) { PyObject *temp; temp = PyObject_GetAttrString(retval, "write"); if (temp == NULL || !PyCallable_Check(temp)) { PyErr_SetString(PyExc_TypeError, "python object must be callable or have " \ "a callable write method"); Py_XDECREF(temp); return -1; } Py_DECREF(temp); } *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval); if (*errobj == NULL) { return -1; } } return 0; } /* Return the position of next non-white-space char in the string */ static int _next_non_white_space(const char* str, int offset) { int ret = offset; while (str[ret] == ' ' || str[ret] == '\t') { ret++; } return ret; } static int _is_alpha_underscore(char ch) { return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_'; } static int _is_alnum_underscore(char ch) { return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9'); } /* * Return the ending position of a variable name */ static int _get_end_of_name(const char* str, int offset) { int ret = offset; while (_is_alnum_underscore(str[ret])) { ret++; } return ret; } /* * Returns 1 if the dimension names pointed by s1 and s2 are the same, * otherwise returns 0. */ static int _is_same_name(const char* s1, const char* s2) { while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) { if (*s1 != *s2) { return 0; } s1++; s2++; } return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2); } /* * Checks if 'obj' is a valid output array for a ufunc, i.e. it is * either None or a writeable array, increments its reference count * and stores a pointer to it in 'store'. Returns 0 on success, sets * an exception and returns -1 on failure. */ static int _set_out_array(PyObject *obj, PyMicArrayObject **store) { if (obj == Py_None) { /* Translate None to NULL */ return 0; } if (PyMicArray_Check(obj)) { /* If it's an array, store it */ if (PyMicArray_FailUnlessWriteable((PyMicArrayObject *)obj, "output array") < 0) { return -1; } Py_INCREF(obj); *store = (PyMicArrayObject *)obj; return 0; } PyErr_SetString(PyExc_TypeError, "return arrays must be of ArrayType"); return -1; } static void ufunc_pre_typeresolver(PyUFuncObject *ufunc, PyMicArrayObject **op, void **ptrs, npy_longlong *buf, int bufsize) { int i; for (i = 0; i < ufunc->nin; ++i) { if (PyMicArray_NDIM(op[i]) == 0) { void *ptr = buf + (i * bufsize); ptrs[i] = PyMicArray_DATA(op[i]); target_memcpy(ptr, PyMicArray_DATA(op[i]), PyMicArray_ITEMSIZE(op[i]), CPU_DEVICE, PyMicArray_DEVICE(op[i])); } } /* Change array data to buffer address */ for (i = 0; i < ufunc->nin; ++i) { if (PyMicArray_NDIM(op[i]) == 0) { PyMicArray_DATA(op[i]) = &(buf[i*bufsize]); } } } static void ufunc_post_typeresolver(PyUFuncObject *ufunc, PyMicArrayObject **op, void **ptrs) { int i; for (i = 0; i < ufunc->nin; ++i) { if (PyMicArray_NDIM(op[i]) == 0) { PyMicArray_DATA(op[i]) = ptrs[i]; } } } /********* GENERIC UFUNC USING ITERATOR *********/ /* * Produce a name for the ufunc, if one is not already set * This is used in the PyUFunc_handlefperr machinery, and in error messages */ static const char* _get_ufunc_name(PyUFuncObject *ufunc) { return ufunc->name ? ufunc->name : "<unnamed ufunc>"; } /* * Parses the positional and keyword arguments for a generic ufunc call. * * Note that if an error is returned, the caller must free the * non-zero references in out_op. This * function does not do its own clean-up. */ static int get_ufunc_arguments(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, PyMicArrayObject **out_op, NPY_ORDER *out_order, NPY_CASTING *out_casting, PyObject **out_extobj, PyObject **out_typetup, int *out_subok, PyMicArrayObject **out_wheremask) { int i, nargs; int nin = ufunc->nin; int nout = ufunc->nout; PyObject *obj, *context; PyObject *str_key_obj = NULL; const char *ufunc_name = _get_ufunc_name(ufunc); int type_num, device; int any_flexible = 0, any_object = 0, any_flexible_userloops = 0; int has_sig = 0; ufunc_name = _get_ufunc_name(ufunc); *out_extobj = NULL; *out_typetup = NULL; if (out_wheremask != NULL) { *out_wheremask = NULL; } /* Check number of arguments */ nargs = PyTuple_Size(args); if ((nargs < nin) || (nargs > ufunc->nargs)) { PyErr_SetString(PyExc_ValueError, "invalid number of arguments"); return -1; } device = PyMicArray_GetCurrentDevice(); /* Get input arguments */ for (i = 0; i < nin; ++i) { obj = PyTuple_GET_ITEM(args, i); if (PyMicArray_Check(obj)) { PyMicArrayObject *obj_a = (PyMicArrayObject *)obj; device = PyMicArray_DEVICE(obj_a); // use for next op Py_INCREF(obj_a); out_op[i] = obj_a; } else if (PyArray_Check(obj)) { out_op[i] = (PyMicArrayObject *)PyMicArray_FromArray( (PyArrayObject *)obj, NULL, device, 0); } else { out_op[i] = (PyMicArrayObject *)PyMicArray_FromAny(device, obj, NULL, 0, 0, 0, NULL); } if (out_op[i] == NULL) { return -1; } type_num = PyMicArray_DESCR(out_op[i])->type_num; if (!any_flexible && PyTypeNum_ISFLEXIBLE(type_num)) { any_flexible = 1; } if (!any_object && PyTypeNum_ISOBJECT(type_num)) { any_object = 1; } /* * If any operand is a flexible dtype, check to see if any * struct dtype ufuncs are registered. A ufunc has been registered * for a struct dtype if ufunc's arg_dtypes array is not NULL. */ if (PyTypeNum_ISFLEXIBLE(type_num) && !any_flexible_userloops && ufunc->userloops != NULL) { PyUFunc_Loop1d *funcdata; PyObject *key, *obj; key = PyInt_FromLong(type_num); if (key == NULL) { continue; } obj = PyDict_GetItem(ufunc->userloops, key); Py_DECREF(key); if (obj == NULL) { continue; } funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); while (funcdata != NULL) { if (funcdata->arg_dtypes != NULL) { any_flexible_userloops = 1; break; } funcdata = funcdata->next; } } } if (any_flexible && !any_flexible_userloops && !any_object) { /* Traditionally, we return -2 here (meaning "NotImplemented") anytime * we hit the above condition. * * This condition basically means "we are doomed", b/c the "flexible" * dtypes -- strings and void -- cannot have their own ufunc loops * registered (except via the special "flexible userloops" mechanism), * and they can't be cast to anything except object (and we only cast * to object if any_object is true). So really we should do nothing * here and continue and let the proper error be raised. But, we can't * quite yet, b/c of backcompat. * * Most of the time, this NotImplemented either got returned directly * to the user (who can't do anything useful with it), or got passed * back out of a special function like __mul__. And fortunately, for * almost all special functions, the end result of this was a * TypeError. Which is also what we get if we just continue without * this special case, so this special case is unnecessary. * * The only thing that actually depended on the NotImplemented is * array_richcompare, which did two things with it. First, it needed * to see this NotImplemented in order to implement the special-case * comparisons for * * string < <= == != >= > string * void == != void * * Now it checks for those cases first, before trying to call the * ufunc, so that's no problem. What it doesn't handle, though, is * cases like * * float < string * * or * * float == void * * For those, it just let the NotImplemented bubble out, and accepted * Python's default handling. And unfortunately, for comparisons, * Python's default is *not* to raise an error. Instead, it returns * something that depends on the operator: * * == return False * != return True * < <= >= > Python 2: use "fallback" (= weird and broken) ordering * Python 3: raise TypeError (hallelujah) * * In most cases this is straightforwardly broken, because comparison * of two arrays should always return an array, and here we end up * returning a scalar. However, there is an exception: if we are * comparing two scalars for equality, then it actually is correct to * return a scalar bool instead of raising an error. If we just * removed this special check entirely, then "np.float64(1) == 'foo'" * would raise an error instead of returning False, which is genuinely * wrong. * * The proper end goal here is: * 1) == and != should be implemented in a proper vectorized way for * all types. The short-term hack for this is just to add a * special case to PyUFunc_DefaultLegacyInnerLoopSelector where * if it can't find a comparison loop for the given types, and * the ufunc is np.equal or np.not_equal, then it returns a loop * that just fills the output array with False (resp. True). Then * array_richcompare could trust that whenever its special cases * don't apply, simply calling the ufunc will do the right thing, * even without this special check. * 2) < <= >= > should raise an error if no comparison function can * be found. array_richcompare already handles all string <> * string cases, and void dtypes don't have ordering, so again * this would mean that array_richcompare could simply call the * ufunc and it would do the right thing (i.e., raise an error), * again without needing this special check. * * So this means that for the transition period, our goal is: * == and != on scalars should simply return NotImplemented like * they always did, since everything ends up working out correctly * in this case only * == and != on arrays should issue a FutureWarning and then return * NotImplemented * < <= >= > on all flexible dtypes on py2 should raise a * DeprecationWarning, and then return NotImplemented. On py3 we * skip the warning, though, b/c it would just be immediately be * followed by an exception anyway. * * And for all other operations, we let things continue as normal. */ /* strcmp() is a hack but I think we can get away with it for this * temporary measure. */ if (!strcmp(ufunc_name, "equal") || !strcmp(ufunc_name, "not_equal")) { /* Warn on non-scalar, return NotImplemented regardless */ assert(nin == 2); if (PyMicArray_NDIM(out_op[0]) != 0 || PyMicArray_NDIM(out_op[1]) != 0) { if (DEPRECATE_FUTUREWARNING( "elementwise comparison failed; returning scalar " "instead, but in the future will perform elementwise " "comparison") < 0) { return -1; } } return -2; } else if (!strcmp(ufunc_name, "less") || !strcmp(ufunc_name, "less_equal") || !strcmp(ufunc_name, "greater") || !strcmp(ufunc_name, "greater_equal")) { #if !defined(NPY_PY3K) if (DEPRECATE("unorderable dtypes; returning scalar but in " "the future this will be an error") < 0) { return -1; } #endif return -2; } } /* Get positional output arguments */ for (i = nin; i < nargs; ++i) { obj = PyTuple_GET_ITEM(args, i); if (_set_out_array(obj, out_op + i) < 0) { return -1; } } /* * Get keyword output and other arguments. * Raise an error if anything else is present in the * keyword dictionary. */ if (kwds != NULL) { PyObject *key, *value; Py_ssize_t pos = 0; while (PyDict_Next(kwds, &pos, &key, &value)) { Py_ssize_t length = 0; char *str = NULL; int bad_arg = 1; #if defined(NPY_PY3K) Py_XDECREF(str_key_obj); str_key_obj = PyUnicode_AsASCIIString(key); if (str_key_obj != NULL) { key = str_key_obj; } #endif if (PyBytes_AsStringAndSize(key, &str, &length) < 0) { PyErr_Clear(); PyErr_SetString(PyExc_TypeError, "invalid keyword argument"); goto fail; } switch (str[0]) { case 'c': /* Provides a policy for allowed casting */ if (strcmp(str, "casting") == 0) { if (!PyArray_CastingConverter(value, out_casting)) { goto fail; } bad_arg = 0; } break; case 'd': /* Another way to specify 'sig' */ if (strcmp(str, "dtype") == 0) { /* Allow this parameter to be None */ PyArray_Descr *dtype; if (!PyArray_DescrConverter2(value, &dtype)) { goto fail; } if (dtype != NULL) { if (*out_typetup != NULL) { PyErr_SetString(PyExc_RuntimeError, "cannot specify both 'sig' and 'dtype'"); goto fail; } *out_typetup = Py_BuildValue("(N)", dtype); } bad_arg = 0; } break; case 'e': /* * Overrides the global parameters buffer size, * error mask, and error object */ if (strcmp(str, "extobj") == 0) { *out_extobj = value; bad_arg = 0; } break; case 'o': /* * Output arrays may be specified as a keyword argument, * either as a single array or None for single output * ufuncs, or as a tuple of arrays and Nones. */ if (strcmp(str, "out") == 0) { if (nargs > nin) { PyErr_SetString(PyExc_ValueError, "cannot specify 'out' as both a " "positional and keyword argument"); goto fail; } if (PyTuple_Check(value)) { if (PyTuple_GET_SIZE(value) != nout) { PyErr_SetString(PyExc_ValueError, "The 'out' tuple must have exactly " "one entry per ufunc output"); goto fail; } /* 'out' must be a tuple of arrays and Nones */ for(i = 0; i < nout; ++i) { PyObject *val = PyTuple_GET_ITEM(value, i); if (_set_out_array(val, out_op+nin+i) < 0) { goto fail; } } } else if (nout == 1) { /* Can be an array if it only has one output */ if (_set_out_array(value, out_op + nin) < 0) { goto fail; } } else { PyErr_SetString(PyExc_TypeError, nout > 1 ? "'out' must be a tuple " "of arrays" : "'out' must be an array or a " "tuple of a single array"); goto fail; } bad_arg = 0; } /* Allows the default output layout to be overridden */ else if (strcmp(str, "order") == 0) { if (!PyArray_OrderConverter(value, out_order)) { goto fail; } bad_arg = 0; } break; case 's': /* TODO: remove??? */ /* Allows a specific function inner loop to be selected */ if (strcmp(str, "sig") == 0 || strcmp(str, "signature") == 0) { if (has_sig == 1) { PyErr_SetString(PyExc_ValueError, "cannot specify both 'sig' and 'signature'"); goto fail; } if (*out_typetup != NULL) { PyErr_SetString(PyExc_RuntimeError, "cannot specify both 'sig' and 'dtype'"); goto fail; } *out_typetup = value; Py_INCREF(value); bad_arg = 0; has_sig = 1; } else if (strcmp(str, "subok") == 0) { if (!PyBool_Check(value)) { PyErr_SetString(PyExc_TypeError, "'subok' must be a boolean"); goto fail; } *out_subok = (value == Py_True); bad_arg = 0; } break; case 'w': /* * Provides a boolean array 'where=' mask if * out_wheremask is supplied. */ if (out_wheremask != NULL && strcmp(str, "where") == 0) { if (PyMicArray_Check(value) && PyMicArray_ISBOOL(value)) { *out_wheremask = (PyMicArrayObject *)value; } else { /* TODO: convert to mic array of bool */ PyArray_Descr *dtype; dtype = PyArray_DescrFromType(NPY_BOOL); if (dtype == NULL) { goto fail; } //*out_wheremask = (PyMicArrayObject *)PyMicArray_FromAny( // value, dtype, // 0, 0, 0, NULL); } if (*out_wheremask == NULL) { goto fail; } bad_arg = 0; } break; } if (bad_arg) { char *format = "'%s' is an invalid keyword to ufunc '%s'"; PyErr_Format(PyExc_TypeError, format, str, ufunc_name); goto fail; } } } Py_XDECREF(str_key_obj); return 0; fail: Py_XDECREF(str_key_obj); Py_XDECREF(*out_extobj); *out_extobj = NULL; Py_XDECREF(*out_typetup); *out_typetup = NULL; if (out_wheremask != NULL) { Py_XDECREF(*out_wheremask); *out_wheremask = NULL; } return -1; } /* * This checks whether a trivial loop is ok, * making copies of scalar and one dimensional operands if that will * help. * * Returns 1 if a trivial loop is ok, 0 if it is not, and * -1 if there is an error. */ static int check_for_trivial_loop(PyUFuncObject *ufunc, PyMicArrayObject **op, PyArray_Descr **dtype, npy_intp buffersize) { npy_intp i, nin = ufunc->nin, nop = nin + ufunc->nout; for (i = 0; i < nop; ++i) { /* * If the dtype doesn't match, or the array isn't aligned, * indicate that the trivial loop can't be done. */ if (op[i] != NULL && (!PyMicArray_ISALIGNED(op[i]) || !PyArray_EquivTypes(dtype[i], PyMicArray_DESCR(op[i])) )) { /* * If op[j] is a scalar or small one dimensional * array input, make a copy to keep the opportunity * for a trivial loop. */ if (i < nin && (PyMicArray_NDIM(op[i]) == 0 || (PyMicArray_NDIM(op[i]) == 1 && PyMicArray_DIM(op[i],0) <= buffersize))) { PyMicArrayObject *tmp; Py_INCREF(dtype[i]); tmp = (PyMicArrayObject *) PyMicArray_FromArray((PyArrayObject *)op[i], dtype[i], PyMicArray_DEVICE(op[i]), 0); if (tmp == NULL) { Py_DECREF(dtype[i]); return -1; } Py_DECREF(op[i]); op[i] = tmp; } else { return 0; } } } return 1; } static void trivial_two_operand_loop(PyMicArrayObject **op, PyUFuncGenericFunction innerloop, void *innerloopdata) { void *data0, *data1; npy_intp stride0, stride1; npy_intp count; int needs_api, device; MPY_TARGET_MIC PyUFuncGenericFunction offloop = innerloop; MPY_TARGET_MIC void (*offdata)(void) = innerloopdata; NPY_BEGIN_THREADS_DEF; needs_api = PyDataType_REFCHK(PyMicArray_DESCR(op[0])) || PyDataType_REFCHK(PyMicArray_DESCR(op[1])); device = PyMicArray_DEVICE(op[0]); PyMicArray_PREPARE_TRIVIAL_PAIR_ITERATION(op[0], op[1], count, data0, data1, stride0, stride1); NPY_UF_DBG_PRINT1("two operand loop count %d\n", (int)count); if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(count); } #pragma offload target(mic:device) in(offloop, offdata, count,\ data0, data1,\ stride0, stride1) { char *data[] = {data0, data1}; npy_intp stride[] = {stride0, stride1}; offloop(data, &count, stride, offdata); } NPY_END_THREADS; } static void trivial_three_operand_loop(PyMicArrayObject **op, PyUFuncGenericFunction innerloop, void *innerloopdata) { void *data0, *data1, *data2; npy_intp stride0, stride1, stride2; npy_intp count; int needs_api, device; MPY_TARGET_MIC PyUFuncGenericFunction offloop = innerloop; MPY_TARGET_MIC void (*offdata)(void) = innerloopdata; NPY_BEGIN_THREADS_DEF; needs_api = PyDataType_REFCHK(PyMicArray_DESCR(op[0])) || PyDataType_REFCHK(PyMicArray_DESCR(op[1])) || PyDataType_REFCHK(PyMicArray_DESCR(op[2])); device = PyMicArray_DEVICE(op[0]); PyMicArray_PREPARE_TRIVIAL_TRIPLE_ITERATION(op[0], op[1], op[2], count, data0, data1, data2, stride0, stride1, stride2); NPY_UF_DBG_PRINT1("three operand loop count %d\n", (int)count); if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(count); } #pragma offload target(mic:device) in(offloop, offdata, count,\ data0, data1, data2,\ stride0, stride1, stride2) { char *data[] = {data0, data1, data2}; npy_intp stride[] = {stride0, stride1, stride2}; offloop(data, &count, stride, offdata); } NPY_END_THREADS; } static int iterator_loop(PyUFuncObject *ufunc, PyMicArrayObject **op, PyArray_Descr **dtype, NPY_ORDER order, npy_intp buffersize, PyObject **arr_prep, PyObject *arr_prep_args, PyUFuncGenericFunction innerloop, void *innerloopdata) { npy_intp i, nin = ufunc->nin, nout = ufunc->nout; npy_intp nop = nin + nout; npy_uint32 op_flags[NPY_MAXARGS]; MpyIter *iter; char *baseptrs[NPY_MAXARGS]; MPY_TARGET_MIC MpyIter_IterNextFunc *iternext; MPY_TARGET_MIC PyUFuncGenericFunction offloop = innerloop; MPY_TARGET_MIC void (*offdata)(void) = innerloopdata; npy_intp *dataptr; npy_intp *stride; npy_intp *count_ptr; int device; PyMicArrayObject **op_it; int new_count = 0; npy_uint32 iter_flags; NPY_BEGIN_THREADS_DEF; /* Set up the flags */ for (i = 0; i < nin; ++i) { op_flags[i] = NPY_ITER_READONLY | NPY_ITER_ALIGNED | NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; /* * If READWRITE flag has been set for this operand, * then clear default READONLY flag */ op_flags[i] |= ufunc->op_flags[i]; if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) { op_flags[i] &= ~NPY_ITER_READONLY; } } for (i = nin; i < nop; ++i) { op_flags[i] = NPY_ITER_WRITEONLY | NPY_ITER_ALIGNED | NPY_ITER_ALLOCATE | NPY_ITER_NO_BROADCAST | NPY_ITER_NO_SUBTYPE | NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; } iter_flags = ufunc->iter_flags | NPY_ITER_EXTERNAL_LOOP | NPY_ITER_REFS_OK | NPY_ITER_ZEROSIZE_OK | NPY_ITER_BUFFERED | NPY_ITER_GROWINNER | NPY_ITER_DELAY_BUFALLOC | NPY_ITER_COPY_IF_OVERLAP; /* * Allocate the iterator. Because the types of the inputs * were already checked, we use the casting rule 'unsafe' which * is faster to calculate. */ iter = MpyIter_AdvancedNew(nop, op, iter_flags, order, NPY_UNSAFE_CASTING, op_flags, dtype, -1, NULL, NULL, buffersize); if (iter == NULL) { return -1; } /* Copy any allocated outputs */ op_it = MpyIter_GetOperandArray(iter); for (i = 0; i < nout; ++i) { if (op[nin+i] == NULL) { op[nin+i] = op_it[nin+i]; Py_INCREF(op[nin+i]); /* Call the __array_prepare__ functions for the new array */ /*if (prepare_ufunc_output(ufunc, &op[nin+i], arr_prep[i], arr_prep_args, i) < 0) { NpyIter_Deallocate(iter); return -1; }*/ /* * In case __array_prepare__ returned a different array, put the * results directly there, ignoring the array allocated by the * iterator. * * Here, we assume the user-provided __array_prepare__ behaves * sensibly and doesn't return an array overlapping in memory * with other operands --- the op[nin+i] array passed to it is newly * allocated and doesn't have any overlap. */ baseptrs[nin+i] = PyMicArray_BYTES(op[nin+i]); } else { baseptrs[nin+i] = PyMicArray_BYTES(op_it[nin+i]); } } /* Only do the loop if the iteration size is non-zero */ if (MpyIter_GetIterSize(iter) != 0) { /* Reset the iterator with the base pointers from possible __array_prepare__ */ for (i = 0; i < nin; ++i) { baseptrs[i] = PyMicArray_BYTES(op_it[i]); } if (MpyIter_ResetBasePointers(iter, baseptrs, NULL) != NPY_SUCCEED) { MpyIter_Deallocate(iter); return -1; } /* Get the variables needed for the loop */ iternext = MpyIter_GetIterNext(iter, NULL); if (iternext == NULL) { MpyIter_Deallocate(iter); return -1; } dataptr = (npy_intp *) MpyIter_GetDataPtrArray(iter); stride = MpyIter_GetInnerStrideArray(iter); count_ptr = MpyIter_GetInnerLoopSizePtr(iter); device = MpyIter_GetDevice(iter); MPY_BEGIN_THREADS_NDITER(iter); /* Execute the loop */ do { //NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*count_ptr); #pragma omp target device(device) map(to: offloop, offdata, count_ptr[0:1],\ dataptr[0:nop], stride[0:nop]) offloop((char **)dataptr, count_ptr, stride, offdata); } while (iternext(iter)); NPY_END_THREADS; } MpyIter_Deallocate(iter); return 0; } /* * trivial_loop_ok - 1 if no alignment, data conversion, etc required * nin - number of inputs * nout - number of outputs * op - the operands (nin + nout of them) * order - the loop execution order/output memory order * buffersize - how big of a buffer to use * arr_prep - the __array_prepare__ functions for the outputs * innerloop - the inner loop function * innerloopdata - data to pass to the inner loop */ static int execute_legacy_ufunc_loop(PyUFuncObject *ufunc, int trivial_loop_ok, PyMicArrayObject **op, PyArray_Descr **dtypes, NPY_ORDER order, npy_intp buffersize, PyObject **arr_prep, PyObject *arr_prep_args) { npy_intp nin = ufunc->nin, nout = ufunc->nout; PyUFuncGenericFunction innerloop; void *innerloopdata; int needs_api = 0; if (ufunc->legacy_inner_loop_selector(ufunc, dtypes, &innerloop, &innerloopdata, &needs_api) < 0) { return -1; } /* If the loop wants the arrays, provide them. */ if (_does_loop_use_arrays(innerloopdata)) { innerloopdata = (void*)op; } /* First check for the trivial cases that don't need an iterator */ if (trivial_loop_ok) { if (nin == 1 && nout == 1) { if (op[1] == NULL && (order == NPY_ANYORDER || order == NPY_KEEPORDER) && PyMicArray_TRIVIALLY_ITERABLE(op[0])) { Py_INCREF(dtypes[1]); op[1] = (PyMicArrayObject *)PyMicArray_NewFromDescr( PyMicArray_DEVICE(op[0]), &PyMicArray_Type, dtypes[1], PyMicArray_NDIM(op[0]), PyMicArray_DIMS(op[0]), NULL, NULL, PyMicArray_ISFORTRAN(op[0]) ? NPY_ARRAY_F_CONTIGUOUS : 0, NULL); if (op[1] == NULL) { return -1; } NPY_UF_DBG_PRINT("trivial 1 input with allocated output\n"); trivial_two_operand_loop(op, innerloop, innerloopdata); return 0; } else if (op[1] != NULL && PyMicArray_NDIM(op[1]) >= PyMicArray_NDIM(op[0]) && PyMicArray_TRIVIALLY_ITERABLE_PAIR(op[0], op[1], PyArray_TRIVIALLY_ITERABLE_OP_READ, PyArray_TRIVIALLY_ITERABLE_OP_NOREAD)) { NPY_UF_DBG_PRINT("trivial 1 input\n"); trivial_two_operand_loop(op, innerloop, innerloopdata); return 0; } } else if (nin == 2 && nout == 1) { if (op[2] == NULL && (order == NPY_ANYORDER || order == NPY_KEEPORDER) && PyMicArray_TRIVIALLY_ITERABLE_PAIR(op[0], op[1], PyArray_TRIVIALLY_ITERABLE_OP_READ, PyArray_TRIVIALLY_ITERABLE_OP_READ)) { PyMicArrayObject *tmp; /* * Have to choose the input with more dimensions to clone, as * one of them could be a scalar. */ if (PyMicArray_NDIM(op[0]) >= PyMicArray_NDIM(op[1])) { tmp = op[0]; } else { tmp = op[1]; } Py_INCREF(dtypes[2]); op[2] = (PyMicArrayObject *)PyMicArray_NewFromDescr( PyMicArray_DEVICE(tmp), &PyMicArray_Type, dtypes[2], PyMicArray_NDIM(tmp), PyMicArray_DIMS(tmp), NULL, NULL, PyMicArray_ISFORTRAN(tmp) ? NPY_ARRAY_F_CONTIGUOUS : 0, NULL); if (op[2] == NULL) { return -1; } NPY_UF_DBG_PRINT("trivial 2 input with allocated output\n"); trivial_three_operand_loop(op, innerloop, innerloopdata); return 0; } else if (op[2] != NULL && PyMicArray_NDIM(op[2]) >= PyMicArray_NDIM(op[0]) && PyMicArray_NDIM(op[2]) >= PyMicArray_NDIM(op[1]) && PyMicArray_TRIVIALLY_ITERABLE_TRIPLE(op[0], op[1], op[2], PyArray_TRIVIALLY_ITERABLE_OP_READ, PyArray_TRIVIALLY_ITERABLE_OP_READ, PyArray_TRIVIALLY_ITERABLE_OP_NOREAD)) { NPY_UF_DBG_PRINT("trivial 2 input\n"); trivial_three_operand_loop(op, innerloop, innerloopdata); return 0; } } } /* * If no trivial loop matched, an iterator is required to * resolve broadcasting, etc */ NPY_UF_DBG_PRINT("iterator loop\n"); if (iterator_loop(ufunc, op, dtypes, order, buffersize, arr_prep, arr_prep_args, innerloop, innerloopdata) < 0) { return -1; } return 0; } /* * nin - number of inputs * nout - number of outputs * wheremask - if not NULL, the 'where=' parameter to the ufunc. * op - the operands (nin + nout of them) * order - the loop execution order/output memory order * buffersize - how big of a buffer to use * arr_prep - the __array_prepare__ functions for the outputs * innerloop - the inner loop function * innerloopdata - data to pass to the inner loop */ static int execute_fancy_ufunc_loop(PyUFuncObject *ufunc, PyMicArrayObject *wheremask, PyMicArrayObject **op, PyArray_Descr **dtypes, NPY_ORDER order, npy_intp buffersize, PyObject **arr_prep, PyObject *arr_prep_args) { int i, nin = ufunc->nin, nout = ufunc->nout; int nop = nin + nout; int nop_real; npy_uint32 op_flags[NPY_MAXARGS]; NpyIter *iter; int device; npy_intp default_op_in_flags = 0, default_op_out_flags = 0; NpyIter_IterNextFunc *iternext; char **dataptr; npy_intp *strides; npy_intp *countptr; PyArrayObject *op_npy[NPY_MAXARGS]; PyMicArrayObject *op_new[nop]; int count_new = 0; npy_uint32 iter_flags; device = PyMUFunc_GetCommonDevice(nin, op); if (wheremask != NULL) { if (nop + 1 > NPY_MAXARGS) { PyErr_SetString(PyExc_ValueError, "Too many operands when including where= parameter"); return -1; } op[nop] = wheremask; dtypes[nop] = NULL; default_op_out_flags |= NPY_ITER_WRITEMASKED; } /* Set up the flags */ for (i = 0; i < nin; ++i) { op_flags[i] = default_op_in_flags | NPY_ITER_READONLY | NPY_ITER_ALIGNED; /* * If READWRITE flag has been set for this operand, * then clear default READONLY flag */ op_flags[i] |= ufunc->op_flags[i]; if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) { op_flags[i] &= ~NPY_ITER_READONLY; } } for (i = nin; i < nop; ++i) { op_flags[i] = default_op_out_flags | NPY_ITER_WRITEONLY | NPY_ITER_ALIGNED | NPY_ITER_NO_BROADCAST | NPY_ITER_NO_SUBTYPE; } if (wheremask != NULL) { op_flags[nop] = NPY_ITER_READONLY | NPY_ITER_ARRAYMASK; } NPY_UF_DBG_PRINT("Making iterator\n"); iter_flags = ufunc->iter_flags | NPY_ITER_EXTERNAL_LOOP | NPY_ITER_REFS_OK | NPY_ITER_ZEROSIZE_OK; /* Allocate output array */ for (i = nin; i < nop; ++i) { PyMicArrayObject *tmp; if (op[i] == NULL) { tmp = PyMUFunc_CreateArrayBroadcast(nin, op, dtypes[i]); if (tmp == NULL) { goto fail; } op[i] = tmp; op_new[count_new++] = tmp; } } /* Copy two array of PyArrayObject * */ for (i = 0; i < nop; ++i) { op_npy[i] = (PyArrayObject *) op[i]; } nop_real = nop + ((wheremask != NULL) ? 1 : 0); /* * Allocate the iterator. Because the types of the inputs * were already checked, we use the casting rule 'unsafe' which * is faster to calculate. */ iter = NpyIter_MultiNew(nop_real, op_npy, iter_flags, order, NPY_UNSAFE_CASTING, op_flags, dtypes); if (iter == NULL) { goto fail; } NPY_UF_DBG_PRINT("Made iterator\n"); /* Call the __array_prepare__ functions where necessary */ /* for (i = 0; i < nout; ++i) { if (prepare_ufunc_output(ufunc, &op[nin+i], arr_prep[i], arr_prep_args, i) < 0) { NpyIter_Deallocate(iter); return -1; } } */ /* Only do the loop if the iteration size is non-zero */ if (NpyIter_GetIterSize(iter) != 0) { PyUFunc_MaskedStridedInnerLoopFunc *innerloop; NpyAuxData *innerloopdata; npy_intp fixed_strides[2*NPY_MAXARGS]; PyArray_Descr **iter_dtypes; NPY_BEGIN_THREADS_DEF; /* Validate that the prepare_ufunc_output didn't mess with pointers */ /* for (i = nin; i < nop; ++i) { if (PyArray_BYTES(op[i]) != PyArray_BYTES(op_it[i])) { PyErr_SetString(PyExc_ValueError, "The __array_prepare__ functions modified the data " "pointer addresses in an invalid fashion"); NpyIter_Deallocate(iter); return -1; } } */ /* * Get the inner loop, with the possibility of specialization * based on the fixed strides. */ NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); iter_dtypes = NpyIter_GetDescrArray(iter); if (ufunc->masked_inner_loop_selector(ufunc, dtypes, wheremask != NULL ? iter_dtypes[nop] : iter_dtypes[nop + nin], fixed_strides, wheremask != NULL ? fixed_strides[nop] : fixed_strides[nop + nin], &innerloop, &innerloopdata, 0) < 0) { NpyIter_Deallocate(iter); return -1; } /* Get the variables needed for the loop */ iternext = NpyIter_GetIterNext(iter, NULL); if (iternext == NULL) { NpyIter_Deallocate(iter); return -1; } dataptr = NpyIter_GetDataPtrArray(iter); strides = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); NPY_BEGIN_THREADS_NDITER(iter); NPY_UF_DBG_PRINT("Actual inner loop:\n"); /* Execute the loop */ do { NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)*countptr); npy_intp count = *countptr; #pragma omp target device(device) \ map(to: innerloop, count, innerloopdata,\ strides[0:nop_real]) innerloop(NULL, strides, NULL, strides[nop], count, innerloopdata); } while (iternext(iter)); NPY_END_THREADS; NPY_AUXDATA_FREE(innerloopdata); } NpyIter_Deallocate(iter); return 0; fail: for (i = 0; i < count_new; ++i) { Py_DECREF(op_new[i]); } return -1; } static PyObject * make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds) { PyObject *out = kwds ? PyDict_GetItem(kwds, mpy_um_str_out) : NULL; PyObject *arr_prep_args; if (out == NULL) { Py_INCREF(args); return args; } else { npy_intp i, nargs = PyTuple_GET_SIZE(args), n; n = nargs; if (n < nin + 1) { n = nin + 1; } arr_prep_args = PyTuple_New(n); if (arr_prep_args == NULL) { return NULL; } /* Copy the tuple, but set the nin-th item to the keyword arg */ for (i = 0; i < nin; ++i) { PyObject *item = PyTuple_GET_ITEM(args, i); Py_INCREF(item); PyTuple_SET_ITEM(arr_prep_args, i, item); } Py_INCREF(out); PyTuple_SET_ITEM(arr_prep_args, nin, out); for (i = nin+1; i < n; ++i) { PyObject *item = PyTuple_GET_ITEM(args, i); Py_INCREF(item); PyTuple_SET_ITEM(arr_prep_args, i, item); } return arr_prep_args; } } /* * check the floating point status * - errmask: mask of status to check * - extobj: ufunc pyvals object * may be null, in which case the thread global one is fetched * - ufunc_name: name of ufunc */ static int _check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) { int fperr; PyObject *errobj = NULL; int ret; int first = 1; if (!errmask) { return 0; } fperr = PyUFunc_getfperr(); if (!fperr) { return 0; } /* Get error object globals */ if (extobj == NULL) { extobj = get_global_ext_obj(); } if (_extract_pyvals(extobj, ufunc_name, NULL, NULL, &errobj) < 0) { Py_XDECREF(errobj); return -1; } ret = PyUFunc_handlefperr(errmask, errobj, fperr, &first); Py_XDECREF(errobj); return ret; } static int PyMUFunc_GeneralizedFunction(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, PyMicArrayObject **op) { int nin, nout; int i, j, idim, nop; const char *ufunc_name; int retval = -1, subok = 1; int needs_api = 0; PyArray_Descr *dtypes[NPY_MAXARGS]; /* Use remapped axes for generalized ufunc */ int broadcast_ndim, iter_ndim; int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS]; int *op_axes[NPY_MAXARGS]; npy_uint32 op_flags[NPY_MAXARGS]; npy_intp iter_shape[NPY_MAXARGS]; NpyIter *iter = NULL; npy_uint32 iter_flags; npy_intp total_problem_size; PyArrayObject *op_npy[NPY_MAXARGS]; int device; /* These parameters come from extobj= or from a TLS global */ int buffersize = 0, errormask = 0; /* The selected inner loop */ PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; /* The dimensions which get passed to the inner loop */ npy_intp inner_dimensions[NPY_MAXDIMS+1]; /* The strides which get passed to the inner loop */ npy_intp *inner_strides = NULL; /* The sizes of the core dimensions (# entries is ufunc->core_num_dim_ix) */ npy_intp *core_dim_sizes = inner_dimensions + 1; int core_dim_ixs_size; /* The __array_prepare__ function to call for each output */ PyObject *arr_prep[NPY_MAXARGS]; /* * This is either args, or args with the out= parameter from * kwds added appropriately. */ PyObject *arr_prep_args = NULL; NPY_ORDER order = NPY_KEEPORDER; /* Use the default assignment casting rule */ NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING; /* When provided, extobj and typetup contain borrowed references */ PyObject *extobj = NULL, *type_tup = NULL; /* backup data to make PyMicArray work with PyArray type resolver */ npy_longlong scal_buffer[4*ufunc->nin]; void *scal_ptrs[ufunc->nin]; if (ufunc == NULL) { PyErr_SetString(PyExc_ValueError, "function not supported"); return -1; } nin = ufunc->nin; nout = ufunc->nout; nop = nin + nout; ufunc_name = _get_ufunc_name(ufunc); NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name); /* Initialize all the operands and dtypes to NULL */ for (i = 0; i < nop; ++i) { op[i] = NULL; dtypes[i] = NULL; arr_prep[i] = NULL; } NPY_UF_DBG_PRINT("Getting arguments\n"); /* Get all the arguments */ retval = get_ufunc_arguments(ufunc, args, kwds, op, &order, &casting, &extobj, &type_tup, &subok, NULL); if (retval < 0) { goto fail; } /* * Figure out the number of iteration dimensions, which * is the broadcast result of all the input non-core * dimensions. */ broadcast_ndim = 0; for (i = 0; i < nin; ++i) { int n = PyMicArray_NDIM(op[i]) - ufunc->core_num_dims[i]; if (n > broadcast_ndim) { broadcast_ndim = n; } } /* * Figure out the number of iterator creation dimensions, * which is the broadcast dimensions + all the core dimensions of * the outputs, so that the iterator can allocate those output * dimensions following the rules of order='F', for example. */ iter_ndim = broadcast_ndim; for (i = nin; i < nop; ++i) { iter_ndim += ufunc->core_num_dims[i]; } if (iter_ndim > NPY_MAXDIMS) { PyErr_Format(PyExc_ValueError, "too many dimensions for generalized ufunc %s", ufunc_name); retval = -1; goto fail; } /* * Validate the core dimensions of all the operands, and collect all of * the labelled core dimensions into 'core_dim_sizes'. * * The behavior has been changed in NumPy 1.10.0, and the following * requirements must be fulfilled or an error will be raised: * * Arguments, both input and output, must have at least as many * dimensions as the corresponding number of core dimensions. In * previous versions, 1's were prepended to the shape as needed. * * Core dimensions with same labels must have exactly matching sizes. * In previous versions, core dimensions of size 1 would broadcast * against other core dimensions with the same label. * * All core dimensions must have their size specified by a passed in * input or output argument. In previous versions, core dimensions in * an output argument that were not specified in an input argument, * and whose size could not be inferred from a passed in output * argument, would have their size set to 1. */ for (i = 0; i < ufunc->core_num_dim_ix; ++i) { core_dim_sizes[i] = -1; } for (i = 0; i < nop; ++i) { if (op[i] != NULL) { int dim_offset = ufunc->core_offsets[i]; int num_dims = ufunc->core_num_dims[i]; int core_start_dim = PyMicArray_NDIM(op[i]) - num_dims; /* Check if operands have enough dimensions */ if (core_start_dim < 0) { PyErr_Format(PyExc_ValueError, "%s: %s operand %d does not have enough " "dimensions (has %d, gufunc core with " "signature %s requires %d)", ufunc_name, i < nin ? "Input" : "Output", i < nin ? i : i - nin, PyMicArray_NDIM(op[i]), ufunc->core_signature, num_dims); retval = -1; goto fail; } /* * Make sure every core dimension exactly matches all other core * dimensions with the same label. */ for (idim = 0; idim < num_dims; ++idim) { int core_dim_index = ufunc->core_dim_ixs[dim_offset+idim]; npy_intp op_dim_size = PyMicArray_DIM(op[i], core_start_dim+idim); if (core_dim_sizes[core_dim_index] == -1) { core_dim_sizes[core_dim_index] = op_dim_size; } else if (op_dim_size != core_dim_sizes[core_dim_index]) { PyErr_Format(PyExc_ValueError, "%s: %s operand %d has a mismatch in its " "core dimension %d, with gufunc " "signature %s (size %zd is different " "from %zd)", ufunc_name, i < nin ? "Input" : "Output", i < nin ? i : i - nin, idim, ufunc->core_signature, op_dim_size, core_dim_sizes[core_dim_index]); retval = -1; goto fail; } } } } /* * Make sure no core dimension is unspecified. */ for (i = 0; i < ufunc->core_num_dim_ix; ++i) { if (core_dim_sizes[i] == -1) { break; } } if (i != ufunc->core_num_dim_ix) { /* * There is at least one core dimension missing, find in which * operand it comes up first (it has to be an output operand). */ const int missing_core_dim = i; int out_op; for (out_op = nin; out_op < nop; ++out_op) { int first_idx = ufunc->core_offsets[out_op]; int last_idx = first_idx + ufunc->core_num_dims[out_op]; for (i = first_idx; i < last_idx; ++i) { if (ufunc->core_dim_ixs[i] == missing_core_dim) { break; } } if (i < last_idx) { /* Change index offsets for error message */ out_op -= nin; i -= first_idx; break; } } PyErr_Format(PyExc_ValueError, "%s: Output operand %d has core dimension %d " "unspecified, with gufunc signature %s", ufunc_name, out_op, i, ufunc->core_signature); retval = -1; goto fail; } /* Fill in the initial part of 'iter_shape' */ for (idim = 0; idim < broadcast_ndim; ++idim) { iter_shape[idim] = -1; } /* Fill in op_axes for all the operands */ j = broadcast_ndim; core_dim_ixs_size = 0; for (i = 0; i < nop; ++i) { int n; if (op[i]) { /* * Note that n may be negative if broadcasting * extends into the core dimensions. */ n = PyMicArray_NDIM(op[i]) - ufunc->core_num_dims[i]; } else { n = broadcast_ndim; } /* Broadcast all the unspecified dimensions normally */ for (idim = 0; idim < broadcast_ndim; ++idim) { if (idim >= broadcast_ndim - n) { op_axes_arrays[i][idim] = idim - (broadcast_ndim - n); } else { op_axes_arrays[i][idim] = -1; } } /* Any output core dimensions shape should be ignored */ for (idim = broadcast_ndim; idim < iter_ndim; ++idim) { op_axes_arrays[i][idim] = -1; } /* Except for when it belongs to this output */ if (i >= nin) { int dim_offset = ufunc->core_offsets[i]; int num_dims = ufunc->core_num_dims[i]; /* Fill in 'iter_shape' and 'op_axes' for this output */ for (idim = 0; idim < num_dims; ++idim) { iter_shape[j] = core_dim_sizes[ ufunc->core_dim_ixs[dim_offset + idim]]; op_axes_arrays[i][j] = n + idim; ++j; } } op_axes[i] = op_axes_arrays[i]; core_dim_ixs_size += ufunc->core_num_dims[i]; } /* Get the buffersize and errormask */ if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) { retval = -1; goto fail; } NPY_UF_DBG_PRINT("Finding inner loop\n"); /* Work around to live with numpy type_resolver */ ufunc_pre_typeresolver(ufunc, op, scal_ptrs, scal_buffer, 4); retval = ufunc->type_resolver(ufunc, casting, (PyArrayObject **)op, type_tup, dtypes); ufunc_post_typeresolver(ufunc, op, scal_ptrs); if (retval < 0) { goto fail; } /* For the generalized ufunc, we get the loop right away too */ retval = ufunc->legacy_inner_loop_selector(ufunc, dtypes, &innerloop, &innerloopdata, &needs_api); if (retval < 0) { goto fail; } #if NPY_UF_DBG_TRACING printf("input types:\n"); for (i = 0; i < nin; ++i) { PyObject_Print((PyObject *)dtypes[i], stdout, 0); printf(" "); } printf("\noutput types:\n"); for (i = nin; i < nop; ++i) { PyObject_Print((PyObject *)dtypes[i], stdout, 0); printf(" "); } printf("\n"); #endif if (subok) { /* TODO: Do we really need subok? */ PyErr_SetString(PyExc_ValueError, "Do not support subok"); goto fail; /* * Get the appropriate __array_prepare__ function to call * for each output */ //_find_array_prepare(args, kwds, arr_prep, nin, nout, 0); /* Set up arr_prep_args if a prep function was needed */ /* for (i = 0; i < nout; ++i) { if (arr_prep[i] != NULL && arr_prep[i] != Py_None) { arr_prep_args = make_arr_prep_args(nin, args, kwds); break; } } */ } /* If the loop wants the arrays, provide them */ if (_does_loop_use_arrays(innerloopdata)) { innerloopdata = (void*)op; } /* * Set up the iterator per-op flags. For generalized ufuncs, we * can't do buffering, so must COPY or UPDATEIFCOPY. */ for (i = 0; i < nin; ++i) { op_flags[i] = NPY_ITER_READONLY | NPY_ITER_ALIGNED; /* * If READWRITE flag has been set for this operand, * then clear default READONLY flag */ op_flags[i] |= ufunc->op_flags[i]; if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) { op_flags[i] &= ~NPY_ITER_READONLY; } } for (i = nin; i < nop; ++i) { op_flags[i] = NPY_ITER_READWRITE| //NPY_ITER_UPDATEIFCOPY| NPY_ITER_ALIGNED| //NPY_ITER_ALLOCATE| NPY_ITER_NO_BROADCAST; } iter_flags = ufunc->iter_flags | NPY_ITER_MULTI_INDEX | NPY_ITER_REFS_OK | NPY_ITER_REDUCE_OK | NPY_ITER_ZEROSIZE_OK; /* Find destination device */ device = PyMUFunc_GetCommonDevice(nin, op); /* Allocate output array */ for (i = nin; i < nop; ++i) { PyMicArrayObject *tmp; if (op[i] == NULL) { tmp = PyMUFunc_CreateArrayBroadcast(nin, op, dtypes[i]); if (tmp == NULL) { goto fail; } op[i] = tmp; } } /* Create temporary PyMicArrayObject * array */ /* TODO cleanup this step */ /* for (i = 0; i < nop; ++i) { op_npy[i] = (PyArrayObject *) op[i]; } */ /* Create the iterator */ iter = NpyIter_AdvancedNew(nop, (PyArrayObject **) op, iter_flags, order, NPY_UNSAFE_CASTING, op_flags, dtypes, iter_ndim, op_axes, iter_shape, 0); if (iter == NULL) { retval = -1; goto fail; } /* Fill in any allocated outputs */ /*for (i = nin; i < nop; ++i) { if (op[i] == NULL) { op[i] = NpyIter_GetOperandArray(iter)[i]; Py_INCREF(op[i]); } } */ /* * Set up the inner strides array. Because we're not doing * buffering, the strides are fixed throughout the looping. */ inner_strides = (npy_intp *)PyArray_malloc( NPY_SIZEOF_INTP * (nop+core_dim_ixs_size)); if (inner_strides == NULL) { PyErr_NoMemory(); retval = -1; goto fail; } /* Copy the strides after the first nop */ idim = nop; for (i = 0; i < nop; ++i) { int num_dims = ufunc->core_num_dims[i]; int core_start_dim = PyMicArray_NDIM(op[i]) - num_dims; /* * Need to use the arrays in the iterator, not op, because * a copy with a different-sized type may have been made. */ //PyArrayObject *arr = NpyIter_GetOperandArray(iter)[i]; PyMicArrayObject *arr = op[i]; npy_intp *shape = PyMicArray_SHAPE(arr); npy_intp *strides = PyMicArray_STRIDES(arr); for (j = 0; j < num_dims; ++j) { if (core_start_dim + j >= 0) { /* * Force the stride to zero when the shape is 1, sot * that the broadcasting works right. */ if (shape[core_start_dim + j] != 1) { inner_strides[idim++] = strides[core_start_dim + j]; } else { inner_strides[idim++] = 0; } } else { inner_strides[idim++] = 0; } } } total_problem_size = NpyIter_GetIterSize(iter); if (total_problem_size < 0) { /* * Only used for threading, if negative (this means that it is * larger then ssize_t before axes removal) assume that the actual * problem is large enough to be threaded usefully. */ total_problem_size = 1000; } /* Remove all the core output dimensions from the iterator */ for (i = broadcast_ndim; i < iter_ndim; ++i) { if (NpyIter_RemoveAxis(iter, broadcast_ndim) != NPY_SUCCEED) { retval = -1; goto fail; } } if (NpyIter_RemoveMultiIndex(iter) != NPY_SUCCEED) { retval = -1; goto fail; } if (NpyIter_EnableExternalLoop(iter) != NPY_SUCCEED) { retval = -1; goto fail; } /* * The first nop strides are for the inner loop (but only can * copy them after removing the core axes */ memcpy(inner_strides, NpyIter_GetInnerStrideArray(iter), NPY_SIZEOF_INTP * nop); #if 0 printf("strides: "); for (i = 0; i < nop+core_dim_ixs_size; ++i) { printf("%d ", (int)inner_strides[i]); } printf("\n"); #endif /* Start with the floating-point exception flags cleared */ PyUFunc_clearfperr(); NPY_UF_DBG_PRINT("Executing inner loop\n"); if (NpyIter_GetIterSize(iter) != 0) { /* Do the ufunc loop */ NpyIter_IterNextFunc *iternext; char **dataptr; npy_intp *count_ptr; NPY_BEGIN_THREADS_DEF; /* Get the variables needed for the loop */ iternext = NpyIter_GetIterNext(iter, NULL); if (iternext == NULL) { retval = -1; goto fail; } dataptr = NpyIter_GetDataPtrArray(iter); count_ptr = NpyIter_GetInnerLoopSizePtr(iter); if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { NPY_BEGIN_THREADS_THRESHOLDED(total_problem_size); } do { inner_dimensions[0] = *count_ptr; #pragma omp target device(device) \ map(to: innerloop, innerloopdata,\ inner_dimensions[0:NPY_MAXDIMS+1],\ inner_strides[0:nop+core_dim_ixs_size]) innerloop(NULL, inner_dimensions, inner_strides, innerloopdata); } while (iternext(iter)); if (!needs_api && !NpyIter_IterationNeedsAPI(iter)) { NPY_END_THREADS; } } else { /** * For each output operand, check if it has non-zero size, * and assign the identity if it does. For example, a dot * product of two zero-length arrays will be a scalar, * which has size one. */ for (i = nin; i < nop; ++i) { if (PyMicArray_SIZE(op[i]) != 0) { switch (ufunc->identity) { case PyUFunc_Zero: assign_reduce_identity_zero(op[i], NULL); break; case PyUFunc_One: assign_reduce_identity_one(op[i], NULL); break; case PyUFunc_MinusOne: assign_reduce_identity_minusone(op[i], NULL); break; case PyUFunc_None: case PyUFunc_ReorderableNone: PyErr_Format(PyExc_ValueError, "ufunc %s ", ufunc_name); retval = -1; goto fail; default: PyErr_Format(PyExc_ValueError, "ufunc %s has an invalid identity for reduction", ufunc_name); retval = -1; goto fail; } } } } /* Check whether any errors occurred during the loop */ if (PyErr_Occurred() || _check_ufunc_fperr(errormask, extobj, ufunc_name) < 0) { retval = -1; goto fail; } PyArray_free(inner_strides); NpyIter_Deallocate(iter); /* The caller takes ownership of all the references in op */ for (i = 0; i < nop; ++i) { Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } Py_XDECREF(type_tup); Py_XDECREF(arr_prep_args); NPY_UF_DBG_PRINT("Returning Success\n"); return 0; fail: NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval); PyArray_free(inner_strides); NpyIter_Deallocate(iter); for (i = 0; i < nop; ++i) { Py_XDECREF(op[i]); op[i] = NULL; Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } Py_XDECREF(type_tup); Py_XDECREF(arr_prep_args); return retval; } /*UFUNC_API * * This generic function is called with the ufunc object, the arguments to it, * and an array of (pointers to) PyMicArrayObjects which are NULL. * * 'op' is an array of at least NPY_MAXARGS PyMicArrayObject *. */ NPY_NO_EXPORT int PyMUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, PyMicArrayObject **op) { int nin, nout; int i, nop; const char *ufunc_name; int retval = -1, subok = 0; int need_fancy = 0; PyArray_Descr *dtypes[NPY_MAXARGS]; /* These parameters come from extobj= or from a TLS global */ int buffersize = 0, errormask = 0; /* The mask provided in the 'where=' parameter */ PyMicArrayObject *wheremask = NULL; /* The __array_prepare__ function to call for each output */ PyObject *arr_prep[NPY_MAXARGS]; /* * This is either args, or args with the out= parameter from * kwds added appropriately. */ PyObject *arr_prep_args = NULL; /* backup data to make PyMicArray work with PyArray type resolver */ npy_longlong scal_buffer[4*ufunc->nin]; void *scal_ptrs[ufunc->nin]; int trivial_loop_ok = 0; NPY_ORDER order = NPY_KEEPORDER; /* Use the default assignment casting rule */ NPY_CASTING casting = NPY_DEFAULT_ASSIGN_CASTING; /* When provided, extobj and typetup contain borrowed references */ PyObject *extobj = NULL, *type_tup = NULL; if (ufunc == NULL) { PyErr_SetString(PyExc_ValueError, "function not supported"); return -1; } if (ufunc->core_enabled) { return PyMUFunc_GeneralizedFunction(ufunc, args, kwds, op); } nin = ufunc->nin; nout = ufunc->nout; nop = nin + nout; ufunc_name = _get_ufunc_name(ufunc); NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name); /* Initialize all the operands and dtypes to NULL */ for (i = 0; i < nop; ++i) { op[i] = NULL; dtypes[i] = NULL; arr_prep[i] = NULL; } NPY_UF_DBG_PRINT("Getting arguments\n"); /* Get all the arguments */ retval = get_ufunc_arguments(ufunc, args, kwds, op, &order, &casting, &extobj, &type_tup, &subok, &wheremask); if (retval < 0) { goto fail; } /* All array have to be in the same device */ retval = _on_same_device(ufunc, op); if (retval < 0) { PyErr_SetString(PyExc_ValueError, "All array have to be on the same device"); goto fail; } /* * Use the masked loop if a wheremask was specified. */ if (wheremask != NULL) { need_fancy = 1; } /* Get the buffersize and errormask */ if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) { retval = -1; goto fail; } NPY_UF_DBG_PRINT("Finding inner loop\n"); /* Work around to live with numpy type_resolver */ ufunc_pre_typeresolver(ufunc, op, scal_ptrs, scal_buffer, 4); retval = ufunc->type_resolver(ufunc, casting, (PyArrayObject **)op, type_tup, dtypes); ufunc_post_typeresolver(ufunc, op, scal_ptrs); if (retval < 0) { goto fail; } /* Only do the trivial loop check for the unmasked version. */ if (!need_fancy) { /* * This checks whether a trivial loop is ok, making copies of * scalar and one dimensional operands if that will help. */ trivial_loop_ok = check_for_trivial_loop(ufunc, op, dtypes, buffersize); if (trivial_loop_ok < 0) { goto fail; } } #if NPY_UF_DBG_TRACING printf("input types:\n"); for (i = 0; i < nin; ++i) { PyObject_Print((PyObject *)dtypes[i], stdout, 0); printf(" "); } printf("\noutput types:\n"); for (i = nin; i < nop; ++i) { PyObject_Print((PyObject *)dtypes[i], stdout, 0); printf(" "); } printf("\n"); #endif if (subok) { PyErr_SetString(PyExc_ValueError, "does not support subok right now"); goto fail; } /* Start with the floating-point exception flags cleared */ PyUFunc_clearfperr(); /* Do the ufunc loop */ if (need_fancy) { NPY_UF_DBG_PRINT("Executing fancy inner loop\n"); retval = execute_fancy_ufunc_loop(ufunc, wheremask, op, dtypes, order, buffersize, arr_prep, arr_prep_args); } else { NPY_UF_DBG_PRINT("Executing legacy inner loop\n"); retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok, op, dtypes, order, buffersize, arr_prep, arr_prep_args); } if (retval < 0) { goto fail; } /* Check whether any errors occurred during the loop */ if (PyErr_Occurred() || _check_ufunc_fperr(errormask, extobj, ufunc_name) < 0) { retval = -1; goto fail; } /* The caller takes ownership of all the references in op */ for (i = 0; i < nop; ++i) { Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } Py_XDECREF(type_tup); Py_XDECREF(arr_prep_args); Py_XDECREF(wheremask); NPY_UF_DBG_PRINT("Returning Success\n"); return 0; fail: NPY_UF_DBG_PRINT1("Returning failure code %d\n", retval); for (i = 0; i < nop; ++i) { Py_XDECREF(op[i]); op[i] = NULL; Py_XDECREF(dtypes[i]); Py_XDECREF(arr_prep[i]); } Py_XDECREF(type_tup); Py_XDECREF(arr_prep_args); Py_XDECREF(wheremask); return retval; } /* * Given the output type, finds the specified binary op. The * ufunc must have nin==2 and nout==1. The function may modify * otype if the given type isn't found. * * Returns 0 on success, -1 on failure. */ static int get_binary_op_function(PyUFuncObject *ufunc, int *otype, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata) { int i; PyUFunc_Loop1d *funcdata; NPY_UF_DBG_PRINT1("Getting binary op function for type number %d\n", *otype); /* If the type is custom and there are userloops, search for it here */ if (ufunc->userloops != NULL && PyTypeNum_ISUSERDEF(*otype)) { PyObject *key, *obj; key = PyInt_FromLong(*otype); if (key == NULL) { return -1; } obj = PyDict_GetItem(ufunc->userloops, key); Py_DECREF(key); if (obj != NULL) { funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); while (funcdata != NULL) { int *types = funcdata->arg_types; if (types[0] == *otype && types[1] == *otype && types[2] == *otype) { *out_innerloop = funcdata->func; *out_innerloopdata = funcdata->data; return 0; } funcdata = funcdata->next; } } } /* Search for a function with compatible inputs */ for (i = 0; i < ufunc->ntypes; ++i) { char *types = ufunc->types + i*ufunc->nargs; NPY_UF_DBG_PRINT3("Trying loop with signature %d %d -> %d\n", types[0], types[1], types[2]); if (PyArray_CanCastSafely(*otype, types[0]) && types[0] == types[1] && (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) { /* If the signature is "xx->x", we found the loop */ if (types[2] == types[0]) { *out_innerloop = ufunc->functions[i]; *out_innerloopdata = ufunc->data[i]; *otype = types[0]; return 0; } /* * Otherwise, we found the natural type of the reduction, * replace otype and search again */ else { *otype = types[2]; break; } } } /* Search for the exact function */ for (i = 0; i < ufunc->ntypes; ++i) { char *types = ufunc->types + i*ufunc->nargs; if (PyArray_CanCastSafely(*otype, types[0]) && types[0] == types[1] && types[1] == types[2] && (*otype == NPY_OBJECT || types[0] != NPY_OBJECT)) { /* Since the signature is "xx->x", we found the loop */ *out_innerloop = ufunc->functions[i]; *out_innerloopdata = ufunc->data[i]; *otype = types[0]; return 0; } } return -1; } static int reduce_type_resolver(PyUFuncObject *ufunc, PyMicArrayObject *arr, PyArray_Descr *odtype, PyArray_Descr **out_dtype) { int i, retcode; PyMicArrayObject *op[3] = {arr, arr, NULL}; PyArray_Descr *dtypes[3] = {NULL, NULL, NULL}; const char *ufunc_name = _get_ufunc_name(ufunc); PyObject *type_tup = NULL; void *ptrs[3]; npy_longlong buf[3*4]; *out_dtype = NULL; /* * If odtype is specified, make a type tuple for the type * resolution. */ if (odtype != NULL) { type_tup = PyTuple_Pack(3, odtype, odtype, Py_None); if (type_tup == NULL) { return -1; } } ufunc_pre_typeresolver(ufunc, op, ptrs, buf, 4); /* Use the type resolution function to find our loop */ retcode = ufunc->type_resolver( ufunc, NPY_UNSAFE_CASTING, (PyArrayObject **)op, type_tup, dtypes); ufunc_post_typeresolver(ufunc, op, ptrs); Py_DECREF(type_tup); if (retcode == -1) { return -1; } else if (retcode == -2) { PyErr_Format(PyExc_RuntimeError, "type resolution returned NotImplemented to " "reduce ufunc %s", ufunc_name); return -1; } /* * The first two type should be equivalent. Because of how * reduce has historically behaved in NumPy, the return type * could be different, and it is the return type on which the * reduction occurs. */ if (!PyArray_EquivTypes(dtypes[0], dtypes[1])) { for (i = 0; i < 3; ++i) { Py_DECREF(dtypes[i]); } PyErr_Format(PyExc_RuntimeError, "could not find a type resolution appropriate for " "reduce ufunc %s", ufunc_name); return -1; } Py_DECREF(dtypes[0]); Py_DECREF(dtypes[1]); *out_dtype = dtypes[2]; return 0; } static int assign_reduce_identity_zero(PyMicArrayObject *result, void *NPY_UNUSED(data)) { return PyMicArray_FillWithScalar(result, PyArrayScalar_False); } static int assign_reduce_identity_one(PyMicArrayObject *result, void *NPY_UNUSED(data)) { return PyMicArray_FillWithScalar(result, PyArrayScalar_True); } static int assign_reduce_identity_minusone(PyMicArrayObject *result, void *NPY_UNUSED(data)) { static PyObject *MinusOne = NULL; if (MinusOne == NULL) { if ((MinusOne = PyInt_FromLong(-1)) == NULL) { return -1; } } return PyMicArray_FillWithScalar(result, MinusOne); } static int reduce_loop(MpyIter *iter, npy_intp skip_first_count, PyUFuncObject *ufunc) { PyArray_Descr *dtypes[3], **iter_dtypes; npy_intp *dataptrs, *strides, *countptr; npy_intp dataptrs_copy[3]; npy_intp strides_copy[3]; int needs_api, device; /* The normal selected inner loop */ void *loopdata; MPY_TARGET_MIC PyUFuncGenericFunction innerloop = NULL; MPY_TARGET_MIC void (*innerloopdata)(void) = NULL; MPY_TARGET_MIC MpyIter_IterNextFunc *iternext = NULL; MPY_TARGET_MIC MpyIter_IsFirstVisitFunc *isfirstvisit; NPY_BEGIN_THREADS_DEF; /* Get the inner loop */ iter_dtypes = MpyIter_GetDescrArray(iter); dtypes[0] = iter_dtypes[0]; dtypes[1] = iter_dtypes[1]; dtypes[2] = iter_dtypes[0]; if (ufunc->legacy_inner_loop_selector(ufunc, dtypes, &innerloop, &loopdata, &needs_api) < 0) { return -1; } iternext = MpyIter_GetIterNext(iter, NULL); if (iternext == NULL) { return -1; } innerloopdata = loopdata; isfirstvisit = &MpyIter_IsFirstVisit; dataptrs = (npy_intp *) MpyIter_GetDataPtrArray(iter); strides = MpyIter_GetInnerStrideArray(iter); countptr = MpyIter_GetInnerLoopSizePtr(iter); device = MpyIter_GetDevice(iter); MPY_BEGIN_THREADS_NDITER(iter); if (skip_first_count > 0) { do { npy_intp count = *countptr; /* Skip any first-visit elements */ if (MpyIter_IsFirstVisit(iter, 0)) { if (strides[0] == 0) { --count; --skip_first_count; dataptrs[1] += strides[1]; } else { skip_first_count -= count; count = 0; } } /* Turn the two items into three for the inner loop */ dataptrs_copy[0] = dataptrs[0]; dataptrs_copy[1] = dataptrs[1]; dataptrs_copy[2] = dataptrs[0]; strides_copy[0] = strides[0]; strides_copy[1] = strides[1]; strides_copy[2] = strides[0]; #pragma omp target device(device) map(to: dataptrs_copy, count,\ strides_copy,\ innerloop, innerloopdata) innerloop((char **)dataptrs_copy, &count, strides_copy, innerloopdata); /* Jump to the faster loop when skipping is done */ if (skip_first_count == 0) { if (iternext(iter)) { break; } else { goto finish_loop; } } } while (iternext(iter)); } do { /* Turn the two items into three for the inner loop */ dataptrs_copy[0] = dataptrs[0]; dataptrs_copy[1] = dataptrs[1]; dataptrs_copy[2] = dataptrs[0]; strides_copy[0] = strides[0]; strides_copy[1] = strides[1]; strides_copy[2] = strides[0]; #pragma omp target device(device) map(to: dataptrs_copy, strides_copy,\ innerloop, innerloopdata,\ countptr[0:1]) innerloop((char **) dataptrs_copy, countptr, strides_copy, innerloopdata); } while (iternext(iter)); finish_loop: NPY_END_THREADS; return (needs_api && PyErr_Occurred()) ? -1 : 0; } /* * The implementation of the reduction operators with the new iterator * turned into a bit of a long function here, but I think the design * of this part needs to be changed to be more like einsum, so it may * not be worth refactoring it too much. Consider this timing: * * >>> a = arange(10000) * * >>> timeit sum(a) * 10000 loops, best of 3: 17 us per loop * * >>> timeit einsum("i->",a) * 100000 loops, best of 3: 13.5 us per loop * * The axes must already be bounds-checked by the calling function, * this function does not validate them. */ static PyMicArrayObject * PyMUFunc_Reduce(PyUFuncObject *ufunc, PyMicArrayObject *arr, PyMicArrayObject *out, int naxes, int *axes, PyArray_Descr *odtype, int keepdims) { int iaxes, reorderable, ndim; npy_bool axis_flags[NPY_MAXDIMS]; PyArray_Descr *dtype; PyMicArrayObject *result; PyMicArray_AssignReduceIdentityFunc *assign_identity = NULL; const char *ufunc_name = _get_ufunc_name(ufunc); /* These parameters come from a TLS global */ int buffersize = 0, errormask = 0; NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s.reduce\n", ufunc_name); ndim = PyMicArray_NDIM(arr); /* Create an array of flags for reduction */ memset(axis_flags, 0, ndim); for (iaxes = 0; iaxes < naxes; ++iaxes) { int axis = axes[iaxes]; if (axis_flags[axis]) { PyErr_SetString(PyExc_ValueError, "duplicate value in 'axis'"); return NULL; } axis_flags[axis] = 1; } switch (ufunc->identity) { case PyUFunc_Zero: assign_identity = &assign_reduce_identity_zero; reorderable = 1; /* * The identity for a dynamic dtype like * object arrays can't be used in general */ if (PyMicArray_ISOBJECT(arr) && PyMicArray_SIZE(arr) != 0) { assign_identity = NULL; } break; case PyUFunc_One: assign_identity = &assign_reduce_identity_one; reorderable = 1; /* * The identity for a dynamic dtype like * object arrays can't be used in general */ if (PyMicArray_ISOBJECT(arr) && PyMicArray_SIZE(arr) != 0) { assign_identity = NULL; } break; case PyUFunc_MinusOne: assign_identity = &assign_reduce_identity_minusone; reorderable = 1; /* * The identity for a dynamic dtype like * object arrays can't be used in general */ if (PyMicArray_ISOBJECT(arr) && PyMicArray_SIZE(arr) != 0) { assign_identity = NULL; } break; case PyUFunc_None: reorderable = 0; break; case PyUFunc_ReorderableNone: reorderable = 1; break; default: PyErr_Format(PyExc_ValueError, "ufunc %s has an invalid identity for reduction", ufunc_name); return NULL; } if (_get_bufsize_errmask(NULL, "reduce", &buffersize, &errormask) < 0) { return NULL; } /* Get the reduction dtype */ if (reduce_type_resolver(ufunc, arr, odtype, &dtype) < 0) { return NULL; } result = PyMUFunc_ReduceWrapper(arr, out, NULL, dtype, dtype, NPY_UNSAFE_CASTING, axis_flags, reorderable, keepdims, 0, assign_identity, reduce_loop, ufunc, buffersize, ufunc_name); Py_DECREF(dtype); return result; } static PyMicArrayObject * PyMUFunc_Accumulate(PyUFuncObject *ufunc, PyMicArrayObject *arr, PyMicArrayObject *out, int axis, int otype) { /* TODO:implement this */ return NULL; } /* * Reduceat performs a reduce over an axis using the indices as a guide * * op.reduceat(array,indices) computes * op.reduce(array[indices[i]:indices[i+1]] * for i=0..end with an implicit indices[i+1]=len(array) * assumed when i=end-1 * * if indices[i+1] <= indices[i]+1 * then the result is array[indices[i]] for that value * * op.accumulate(array) is the same as * op.reduceat(array,indices)[::2] * where indices is range(len(array)-1) with a zero placed in every other sample * indices = zeros(len(array)*2-1) * indices[1::2] = range(1,len(array)) * * output shape is based on the size of indices */ static PyMicArrayObject * PyMUFunc_Reduceat(PyUFuncObject *ufunc, PyMicArrayObject *arr, PyArrayObject *ind, PyMicArrayObject *out, int axis, int otype) { //TODO: Implement this return NULL; } /* * This code handles reduce, reduceat, and accumulate * (accumulate and reduce are special cases of the more general reduceat * but they are handled separately for speed) */ static PyObject * PyMUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, int operation) { int i, naxes=0, ndim; int axes[NPY_MAXDIMS]; PyObject *axes_in = NULL; PyMicArrayObject *mp, *ret = NULL; PyObject *op, *res = NULL; PyObject *obj_ind, *context; PyArrayObject *indices = NULL; PyArray_Descr *otype = NULL; PyObject *out_obj = NULL; PyMicArrayObject *out = NULL; int keepdims = 0; static char *reduce_kwlist[] = { "array", "axis", "dtype", "out", "keepdims", NULL}; static char *accumulate_kwlist[] = { "array", "axis", "dtype", "out", "keepdims", NULL}; static char *reduceat_kwlist[] = { "array", "indices", "axis", "dtype", "out", NULL}; static char *_reduce_type[] = {"reduce", "accumulate", "reduceat", NULL}; if (ufunc == NULL) { PyErr_SetString(PyExc_ValueError, "function not supported"); return NULL; } if (ufunc->core_enabled) { PyErr_Format(PyExc_RuntimeError, "Reduction not defined on ufunc with signature"); return NULL; } if (ufunc->nin != 2) { PyErr_Format(PyExc_ValueError, "%s only supported for binary functions", _reduce_type[operation]); return NULL; } if (ufunc->nout != 1) { PyErr_Format(PyExc_ValueError, "%s only supported for functions " "returning a single value", _reduce_type[operation]); return NULL; } /* if there is a tuple of 1 for `out` in kwds, unpack it */ if (kwds != NULL) { PyObject *out_obj = PyDict_GetItem(kwds, mpy_um_str_out); if (out_obj != NULL && PyTuple_CheckExact(out_obj)) { if (PyTuple_GET_SIZE(out_obj) != 1) { PyErr_SetString(PyExc_ValueError, "The 'out' tuple must have exactly one entry"); return NULL; } out_obj = PyTuple_GET_ITEM(out_obj, 0); PyDict_SetItem(kwds, mpy_um_str_out, out_obj); } } if (operation == UFUNC_REDUCEAT) { PyArray_Descr *indtype; indtype = PyArray_DescrFromType(NPY_INTP); if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|OO&O&:reduceat", reduceat_kwlist, &op, &obj_ind, &axes_in, PyArray_DescrConverter2, &otype, PyMicArray_OutputConverter, &out)) { Py_XDECREF(otype); return NULL; } indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, 1, 1, NPY_ARRAY_CARRAY, NULL); if (indices == NULL) { Py_XDECREF(otype); return NULL; } } else if (operation == UFUNC_ACCUMULATE) { PyObject *bad_keepdimarg = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&O:accumulate", accumulate_kwlist, &op, &axes_in, PyArray_DescrConverter2, &otype, PyArray_OutputConverter, &out, &bad_keepdimarg)) { Py_XDECREF(otype); return NULL; } /* Until removed outright by https://github.com/numpy/numpy/pull/8187 */ if (bad_keepdimarg != NULL) { if (DEPRECATE_FUTUREWARNING( "keepdims argument has no effect on accumulate, and will be " "removed in future") < 0) { Py_XDECREF(otype); return NULL; } } } else { if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&i:reduce", reduce_kwlist, &op, &axes_in, PyArray_DescrConverter2, &otype, PyMicArray_OutputConverter, &out, &keepdims)) { Py_XDECREF(otype); return NULL; } } /* Ensure input is an array */ if (!PyMicArray_Check(op)) { PyErr_SetString(PyExc_TypeError, "array must be an MicArray"); Py_XDECREF(otype); return NULL; } Py_INCREF(op); mp = (PyMicArrayObject *)op; ndim = PyMicArray_NDIM(mp); /* Check to see that type (and otype) is not FLEXIBLE */ if (PyMicArray_ISFLEXIBLE(mp) || (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { PyErr_Format(PyExc_TypeError, "cannot perform %s with flexible type", _reduce_type[operation]); Py_XDECREF(otype); Py_DECREF(mp); return NULL; } /* Convert the 'axis' parameter into a list of axes */ if (axes_in == NULL) { naxes = 1; axes[0] = 0; } /* Convert 'None' into all the axes */ else if (axes_in == Py_None) { naxes = ndim; for (i = 0; i < naxes; ++i) { axes[i] = i; } } else if (PyTuple_Check(axes_in)) { naxes = PyTuple_Size(axes_in); if (naxes < 0 || naxes > NPY_MAXDIMS) { PyErr_SetString(PyExc_ValueError, "too many values for 'axis'"); Py_XDECREF(otype); Py_DECREF(mp); return NULL; } for (i = 0; i < naxes; ++i) { PyObject *tmp = PyTuple_GET_ITEM(axes_in, i); int axis = PyArray_PyIntAsInt(tmp); if (axis == -1 && PyErr_Occurred()) { Py_XDECREF(otype); Py_DECREF(mp); return NULL; } if (check_and_adjust_axis(&axis, ndim) < 0) { Py_XDECREF(otype); Py_DECREF(mp); return NULL; } axes[i] = (int)axis; } } /* Try to interpret axis as an integer */ else { int axis = PyArray_PyIntAsInt(axes_in); /* TODO: PyNumber_Index would be good to use here */ if (axis == -1 && PyErr_Occurred()) { Py_XDECREF(otype); Py_DECREF(mp); return NULL; } /* Special case letting axis={0 or -1} slip through for scalars */ if (ndim == 0 && (axis == 0 || axis == -1)) { axis = 0; } else if (check_and_adjust_axis(&axis, ndim) < 0) { return NULL; } axes[0] = (int)axis; naxes = 1; } /* Check to see if input is zero-dimensional. */ if (ndim == 0) { /* * A reduction with no axes is still valid but trivial. * As a special case for backwards compatibility in 'sum', * 'prod', et al, also allow a reduction where axis=0, even * though this is technically incorrect. */ naxes = 0; if (!(operation == UFUNC_REDUCE && (naxes == 0 || (naxes == 1 && axes[0] == 0)))) { PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", _reduce_type[operation]); Py_XDECREF(otype); Py_DECREF(mp); return NULL; } } /* * If out is specified it determines otype * unless otype already specified. */ if (otype == NULL && out != NULL) { otype = PyMicArray_DESCR(out); Py_INCREF(otype); } if (otype == NULL) { /* * For integer types --- make sure at least a long * is used for add and multiply reduction to avoid overflow */ int typenum = PyMicArray_TYPE(mp); if ((PyTypeNum_ISBOOL(typenum) || PyTypeNum_ISINTEGER(typenum)) && ((strcmp(ufunc->name,"add") == 0) || (strcmp(ufunc->name,"multiply") == 0))) { if (PyTypeNum_ISBOOL(typenum)) { typenum = NPY_LONG; } else if ((size_t)PyMicArray_DESCR(mp)->elsize < sizeof(long)) { if (PyTypeNum_ISUNSIGNED(typenum)) { typenum = NPY_ULONG; } else { typenum = NPY_LONG; } } } otype = PyArray_DescrFromType(typenum); } switch(operation) { case UFUNC_REDUCE: ret = PyMUFunc_Reduce(ufunc, mp, out, naxes, axes, otype, keepdims); break; case UFUNC_ACCUMULATE: if (naxes != 1) { PyErr_SetString(PyExc_ValueError, "accumulate does not allow multiple axes"); Py_XDECREF(otype); Py_DECREF(mp); return NULL; } ret = PyMUFunc_Accumulate(ufunc, mp, out, axes[0], otype->type_num); break; case UFUNC_REDUCEAT: if (naxes != 1) { PyErr_SetString(PyExc_ValueError, "reduceat does not allow multiple axes"); Py_XDECREF(otype); Py_DECREF(mp); return NULL; } ret = PyMUFunc_Reduceat(ufunc, mp, indices, out, axes[0], otype->type_num); Py_DECREF(indices); break; } Py_DECREF(mp); Py_DECREF(otype); if (ret == NULL) { return NULL; } /* If an output parameter was provided, don't wrap it */ if (out != NULL) { return (PyObject *)ret; } if (Py_TYPE(op) != Py_TYPE(ret)) { res = PyObject_CallMethod(op, "__array_wrap__", "O", ret); if (res == NULL) { PyErr_Clear(); } else if (res == Py_None) { Py_DECREF(res); } else { Py_DECREF(ret); return res; } } return PyMicArray_Return(ret); } /* * Returns an incref'ed pointer to the proper wrapping object for a * ufunc output argument, given the output argument 'out', and the * input's wrapping function, 'wrap'. */ static PyObject* _get_out_wrap(PyObject *out, PyObject *wrap) { PyObject *owrap; if (out == Py_None) { /* Iterator allocated outputs get the input's wrapping */ Py_XINCREF(wrap); return wrap; } if (PyMicArray_CheckExact(out) || PyArray_CheckExact(out)) { /* None signals to not call any wrapping */ Py_RETURN_NONE; } /* * For array subclasses use their __array_wrap__ method, or the * input's wrapping if not available */ owrap = PyObject_GetAttr(out, mpy_um_str_array_wrap); if (owrap == NULL || !PyCallable_Check(owrap)) { Py_XDECREF(owrap); owrap = wrap; Py_XINCREF(wrap); PyErr_Clear(); } return owrap; } /* * This function analyzes the input arguments * and determines an appropriate __array_wrap__ function to call * for the outputs. * * If an output argument is provided, then it is wrapped * with its own __array_wrap__ not with the one determined by * the input arguments. * * if the provided output argument is already an array, * the wrapping function is None (which means no wrapping will * be done --- not even PyArray_Return). * * A NULL is placed in output_wrap for outputs that * should just have PyArray_Return called. */ static void _find_array_wrap(PyObject *args, PyObject *kwds, PyObject **output_wrap, int nin, int nout) { Py_ssize_t nargs; int i, idx_offset, start_idx; int np = 0; PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS]; PyObject *obj, *wrap = NULL; /* * If a 'subok' parameter is passed and isn't True, don't wrap but put None * into slots with out arguments which means return the out argument */ if (kwds != NULL && (obj = PyDict_GetItem(kwds, mpy_um_str_subok)) != NULL) { if (obj != Py_True) { /* skip search for wrap members */ goto handle_out; } } for (i = 0; i < nin; i++) { obj = PyTuple_GET_ITEM(args, i); if (PyMicArray_CheckExact(obj) || PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { continue; } wrap = PyObject_GetAttr(obj, mpy_um_str_array_wrap); if (wrap) { if (PyCallable_Check(wrap)) { with_wrap[np] = obj; wraps[np] = wrap; ++np; } else { Py_DECREF(wrap); wrap = NULL; } } else { PyErr_Clear(); } } if (np > 0) { /* If we have some wraps defined, find the one of highest priority */ wrap = wraps[0]; if (np > 1) { double maxpriority = PyArray_GetPriority(with_wrap[0], NPY_PRIORITY); for (i = 1; i < np; ++i) { double priority = PyArray_GetPriority(with_wrap[i], NPY_PRIORITY); if (priority > maxpriority) { maxpriority = priority; Py_DECREF(wrap); wrap = wraps[i]; } else { Py_DECREF(wraps[i]); } } } } /* * Here wrap is the wrapping function determined from the * input arrays (could be NULL). * * For all the output arrays decide what to do. * * 1) Use the wrap function determined from the input arrays * This is the default if the output array is not * passed in. * * 2) Use the __array_wrap__ method of the output object * passed in. -- this is special cased for * exact ndarray so that no PyArray_Return is * done in that case. */ handle_out: nargs = PyTuple_GET_SIZE(args); /* Default is using positional arguments */ obj = args; idx_offset = nin; start_idx = 0; if (nin == nargs && kwds != NULL) { /* There may be a keyword argument we can use instead */ obj = PyDict_GetItem(kwds, mpy_um_str_out); if (obj == NULL) { /* No, go back to positional (even though there aren't any) */ obj = args; } else { idx_offset = 0; if (PyTuple_Check(obj)) { /* If a tuple, must have all nout items */ nargs = nout; } else { /* If the kwarg is not a tuple then it is an array (or None) */ output_wrap[0] = _get_out_wrap(obj, wrap); start_idx = 1; nargs = 1; } } } for (i = start_idx; i < nout; ++i) { int j = idx_offset + i; if (j < nargs) { output_wrap[i] = _get_out_wrap(PyTuple_GET_ITEM(obj, j), wrap); } else { output_wrap[i] = wrap; Py_XINCREF(wrap); } } Py_XDECREF(wrap); return; } static PyObject * mufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { int i; PyTupleObject *ret; PyMicArrayObject *mps[NPY_MAXARGS]; PyObject *retobj[NPY_MAXARGS]; PyObject *wraparr[NPY_MAXARGS]; PyObject *res; PyObject *override = NULL; int errval; /* * Initialize all array objects to NULL to make cleanup easier * if something goes wrong. */ for (i = 0; i < ufunc->nargs; i++) { mps[i] = NULL; } errval = PyMUFunc_GenericFunction(ufunc, args, kwds, mps); if (errval < 0) { for (i = 0; i < ufunc->nargs; i++) { PyArray_XDECREF_ERR((PyArrayObject *)mps[i]); } if (errval == -1) { return NULL; } else if (ufunc->nin == 2 && ufunc->nout == 1) { /* * For array_richcompare's benefit -- see the long comment in * get_ufunc_arguments. */ Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } else { PyErr_SetString(PyExc_TypeError, "XX can't happen, please report a bug XX"); return NULL; } } /* Free the input references */ for (i = 0; i < ufunc->nin; i++) { Py_XDECREF(mps[i]); } /* * Use __array_wrap__ on all outputs * if present on one of the input arguments. * If present for multiple inputs: * use __array_wrap__ of input object with largest * __array_priority__ (default = 0.0) * * Exception: we should not wrap outputs for items already * passed in as output-arguments. These items should either * be left unwrapped or wrapped by calling their own __array_wrap__ * routine. * * For each output argument, wrap will be either * NULL --- call PyArray_Return() -- default if no output arguments given * None --- array-object passed in don't call PyArray_Return * method --- the __array_wrap__ method to call. */ _find_array_wrap(args, kwds, wraparr, ufunc->nin, ufunc->nout); /* wrap outputs */ for (i = 0; i < ufunc->nout; i++) { int j = ufunc->nin+i; PyObject *wrap = wraparr[i]; if (wrap != NULL) { if (wrap == Py_None) { Py_DECREF(wrap); retobj[i] = (PyObject *)mps[j]; continue; } res = PyObject_CallFunction(wrap, "O(OOi)", mps[j], ufunc, args, i); /* Handle __array_wrap__ that does not accept a context argument */ if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); res = PyObject_CallFunctionObjArgs(wrap, mps[j], NULL); } Py_DECREF(wrap); if (res == NULL) { goto fail; } else { Py_DECREF(mps[j]); retobj[i] = res; continue; } } else { /* default behavior */ retobj[i] = PyMicArray_Return(mps[j]); } } if (ufunc->nout == 1) { return retobj[0]; } else { ret = (PyTupleObject *)PyTuple_New(ufunc->nout); for (i = 0; i < ufunc->nout; i++) { PyTuple_SET_ITEM(ret, i, retobj[i]); } return (PyObject *)ret; } fail: for (i = ufunc->nin; i < ufunc->nargs; i++) { Py_XDECREF(mps[i]); } return NULL; } NPY_NO_EXPORT PyObject * ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *thedict; PyObject *res; if (!PyArg_ParseTuple(args, "")) { return NULL; } thedict = PyThreadState_GetDict(); if (thedict == NULL) { thedict = PyEval_GetBuiltins(); } res = PyDict_GetItem(thedict, mpy_um_str_pyvals_name); if (res != NULL) { Py_INCREF(res); return res; } /* Construct list of defaults */ res = PyList_New(3); if (res == NULL) { return NULL; } PyList_SET_ITEM(res, 0, PyInt_FromLong(NPY_BUFSIZE)); PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT)); PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None); return res; } #if USE_USE_DEFAULTS==1 /* * This is a strategy to buy a little speed up and avoid the dictionary * look-up in the default case. It should work in the presence of * threads. If it is deemed too complicated or it doesn't actually work * it could be taken out. */ static int ufunc_update_use_defaults(void) { PyObject *errobj = NULL; int errmask, bufsize; int res; PyUFunc_NUM_NODEFAULTS += 1; res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj); PyUFunc_NUM_NODEFAULTS -= 1; if (res < 0) { Py_XDECREF(errobj); return -1; } if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != NPY_BUFSIZE) || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { PyUFunc_NUM_NODEFAULTS += 1; } else if (PyUFunc_NUM_NODEFAULTS > 0) { PyUFunc_NUM_NODEFAULTS -= 1; } Py_XDECREF(errobj); return 0; } #endif NPY_NO_EXPORT PyObject * ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *thedict; int res; PyObject *val; static char *msg = "Error object must be a list of length 3"; if (!PyArg_ParseTuple(args, "O", &val)) { return NULL; } if (!PyList_CheckExact(val) || PyList_GET_SIZE(val) != 3) { PyErr_SetString(PyExc_ValueError, msg); return NULL; } thedict = PyThreadState_GetDict(); if (thedict == NULL) { thedict = PyEval_GetBuiltins(); } res = PyDict_SetItem(thedict, mpy_um_str_pyvals_name, val); if (res < 0) { return NULL; } #if USE_USE_DEFAULTS==1 if (ufunc_update_use_defaults() < 0) { return NULL; } #endif Py_RETURN_NONE; } /*UFUNC_API*/ NPY_NO_EXPORT PyObject * PyMUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data, char *types, int ntypes, int nin, int nout, int identity, const char *name, const char *doc, int unused) { return PyMUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, nin, nout, identity, name, doc, 0, NULL); } /*UFUNC_API*/ NPY_NO_EXPORT PyObject * PyMUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, char *types, int ntypes, int nin, int nout, int identity, const char *name, const char *doc, int unused, const char *signature) { PyUFuncObject *ufunc; ufunc = (PyUFuncObject *) PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, nin, nout, identity, name, doc, unused, signature); if (ufunc != NULL) { /* Modify objec_type to PyMUFunc_Type */ ufunc->ob_type = &PyMUFunc_Type; } return (PyObject *)ufunc; } static int _does_loop_use_arrays(void *data) { return (data == PyUFunc_SetUsesArraysAsData); } /* * This is the first-part of the CObject structure. * * I don't think this will change, but if it should, then * this needs to be fixed. The exposed C-API was insufficient * because I needed to replace the pointer and it wouldn't * let me with a destructor set (even though it works fine * with the destructor). */ typedef struct { PyObject_HEAD void *c_obj; } _simple_cobj; #define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val) /* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 */ static int cmp_arg_types(int *arg1, int *arg2, int n) { for (; n > 0; n--, arg1++, arg2++) { if (PyArray_EquivTypenums(*arg1, *arg2)) { continue; } if (PyArray_CanCastSafely(*arg1, *arg2)) { return -1; } return 1; } return 0; } /* * This frees the linked-list structure when the CObject * is destroyed (removed from the internal dictionary) */ static NPY_INLINE void _free_loop1d_list(PyUFunc_Loop1d *data) { int i; while (data != NULL) { PyUFunc_Loop1d *next = data->next; PyArray_free(data->arg_types); if (data->arg_dtypes != NULL) { for (i = 0; i < data->nargs; i++) { Py_DECREF(data->arg_dtypes[i]); } PyArray_free(data->arg_dtypes); } PyArray_free(data); data = next; } } #if PY_VERSION_HEX >= 0x03000000 static void _loop1d_list_free(PyObject *ptr) { PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)PyCapsule_GetPointer(ptr, NULL); _free_loop1d_list(data); } #else static void _loop1d_list_free(void *ptr) { PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)ptr; _free_loop1d_list(data); } #endif #undef _SETCPTR static void mufunc_dealloc(PyUFuncObject *ufunc) { PyArray_free(ufunc->core_num_dims); PyArray_free(ufunc->core_dim_ixs); PyArray_free(ufunc->core_offsets); PyArray_free(ufunc->core_signature); PyArray_free(ufunc->ptr); PyArray_free(ufunc->op_flags); Py_XDECREF(ufunc->userloops); Py_XDECREF(ufunc->obj); PyArray_free(ufunc); } static PyObject * mufunc_repr(PyUFuncObject *ufunc) { return PyUString_FromFormat("<mufunc '%s'>", ufunc->name); } /****************************************************************************** *** UFUNC METHODS *** *****************************************************************************/ /* * op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) * where a has b.ndim NewAxis terms appended. * * The result has dimensions a.ndim + b.ndim */ static PyObject * mufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { //TODO int i; int errval; PyObject *override = NULL; PyObject *ret; PyArrayObject *ap1 = NULL, *ap2 = NULL, *ap_new = NULL; PyObject *new_args, *tmp; PyObject *shape1, *shape2, *newshape; if (ufunc->core_enabled) { PyErr_Format(PyExc_TypeError, "method outer is not allowed in ufunc with non-trivial"\ " signature"); return NULL; } if (ufunc->nin != 2) { PyErr_SetString(PyExc_ValueError, "outer product only supported "\ "for binary functions"); return NULL; } if (PySequence_Length(args) != 2) { PyErr_SetString(PyExc_TypeError, "exactly two arguments expected"); return NULL; } tmp = PySequence_GetItem(args, 0); if (tmp == NULL) { return NULL; } ap1 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0); Py_DECREF(tmp); if (ap1 == NULL) { return NULL; } tmp = PySequence_GetItem(args, 1); if (tmp == NULL) { return NULL; } ap2 = (PyArrayObject *)PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0); Py_DECREF(tmp); if (ap2 == NULL) { Py_DECREF(ap1); return NULL; } /* Construct new shape tuple */ shape1 = PyTuple_New(PyArray_NDIM(ap1)); if (shape1 == NULL) { goto fail; } for (i = 0; i < PyArray_NDIM(ap1); i++) { PyTuple_SET_ITEM(shape1, i, PyLong_FromLongLong((npy_longlong)PyArray_DIMS(ap1)[i])); } shape2 = PyTuple_New(PyArray_NDIM(ap2)); for (i = 0; i < PyArray_NDIM(ap2); i++) { PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1)); } if (shape2 == NULL) { Py_DECREF(shape1); goto fail; } newshape = PyNumber_Add(shape1, shape2); Py_DECREF(shape1); Py_DECREF(shape2); if (newshape == NULL) { goto fail; } ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape); Py_DECREF(newshape); if (ap_new == NULL) { goto fail; } new_args = Py_BuildValue("(OO)", ap_new, ap2); Py_DECREF(ap1); Py_DECREF(ap2); Py_DECREF(ap_new); ret = mufunc_generic_call(ufunc, new_args, kwds); Py_DECREF(new_args); return ret; fail: Py_XDECREF(ap1); Py_XDECREF(ap2); Py_XDECREF(ap_new); return NULL; } static PyObject * mufunc_reduce(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { int errval; PyObject *override = NULL; /* `nin`, the last arg, is unused. So we put 0. */ return PyMUFunc_GenericReduction(ufunc, args, kwds, UFUNC_REDUCE); } static PyObject * mufunc_accumulate(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { int errval; PyObject *override = NULL; return PyMUFunc_GenericReduction(ufunc, args, kwds, UFUNC_ACCUMULATE); } static PyObject * mufunc_reduceat(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds) { int errval; PyObject *override = NULL; return PyMUFunc_GenericReduction(ufunc, args, kwds, UFUNC_REDUCEAT); } /* Helper for ufunc_at, below */ static NPY_INLINE PyMicArrayObject * new_array_op(PyMicArrayObject *op_array, char *data) { npy_intp dims[1] = {1}; /* TODO: get default device */ PyObject *r = PyMicArray_NewFromDescr(-1, &PyMicArray_Type, PyMicArray_DESCR(op_array), 1, dims, NULL, data, NPY_ARRAY_WRITEABLE, NULL); return (PyMicArrayObject *)r; } /* * Call ufunc only on selected array items and store result in first operand. * For add ufunc, method call is equivalent to op1[idx] += op2 with no * buffering of the first operand. * Arguments: * op1 - First operand to ufunc * idx - Indices that are applied to first operand. Equivalent to op1[idx]. * op2 - Second operand to ufunc (if needed). Must be able to broadcast * over first operand. */ static PyObject * mufunc_at(PyUFuncObject *ufunc, PyObject *args) { //TODO return NULL; } //static: mic undefined symbol error if we set static here NPY_NO_EXPORT struct PyMethodDef mufunc_methods[] = { {"reduce", (PyCFunction)mufunc_reduce, METH_VARARGS | METH_KEYWORDS, NULL }, /*{"accumulate", (PyCFunction)mufunc_accumulate, METH_VARARGS | METH_KEYWORDS, NULL }, {"reduceat", (PyCFunction)mufunc_reduceat, METH_VARARGS | METH_KEYWORDS, NULL }, {"outer", (PyCFunction)mufunc_outer, METH_VARARGS | METH_KEYWORDS, NULL}, {"at", (PyCFunction)mufunc_at, METH_VARARGS, NULL},*/ {NULL, NULL, 0, NULL} /* sentinel */ }; /****************************************************************************** *** UFUNC GETSET *** *****************************************************************************/ /* construct the string y1,y2,...,yn */ static PyObject * _makeargs(int num, char *ltr, int null_if_none) { PyObject *str; int i; switch (num) { case 0: if (null_if_none) { return NULL; } return PyString_FromString(""); case 1: return PyString_FromString(ltr); } str = PyString_FromFormat("%s1, %s2", ltr, ltr); for (i = 3; i <= num; ++i) { PyString_ConcatAndDel(&str, PyString_FromFormat(", %s%d", ltr, i)); } return str; } static char _typecharfromnum(int num) { PyArray_Descr *descr; char ret; descr = PyArray_DescrFromType(num); ret = descr->type; Py_DECREF(descr); return ret; } static PyObject * ufunc_get_doc(PyUFuncObject *ufunc) { /* * Put docstring first or FindMethod finds it... could so some * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention * construct name(x1, x2, ...,[ out1, out2, ...]) __doc__ */ PyObject *outargs, *inargs, *doc; outargs = _makeargs(ufunc->nout, "out", 1); inargs = _makeargs(ufunc->nin, "x", 0); if (ufunc->doc == NULL) { if (outargs == NULL) { doc = PyUString_FromFormat("%s(%s)\n\n", ufunc->name, PyString_AS_STRING(inargs)); } else { doc = PyUString_FromFormat("%s(%s[, %s])\n\n", ufunc->name, PyString_AS_STRING(inargs), PyString_AS_STRING(outargs)); Py_DECREF(outargs); } } else { if (outargs == NULL) { doc = PyUString_FromFormat("%s(%s)\n\n%s", ufunc->name, PyString_AS_STRING(inargs), ufunc->doc); } else { doc = PyUString_FromFormat("%s(%s[, %s])\n\n%s", ufunc->name, PyString_AS_STRING(inargs), PyString_AS_STRING(outargs), ufunc->doc); Py_DECREF(outargs); } } Py_DECREF(inargs); return doc; } static PyObject * ufunc_get_nin(PyUFuncObject *ufunc) { return PyInt_FromLong(ufunc->nin); } static PyObject * ufunc_get_nout(PyUFuncObject *ufunc) { return PyInt_FromLong(ufunc->nout); } static PyObject * ufunc_get_nargs(PyUFuncObject *ufunc) { return PyInt_FromLong(ufunc->nin + ufunc->nout); } static PyObject * ufunc_get_ntypes(PyUFuncObject *ufunc) { return PyInt_FromLong(ufunc->ntypes); } static PyObject * ufunc_get_types(PyUFuncObject *ufunc) { /* return a list with types grouped input->output */ PyObject *list; PyObject *str; int k, j, n, nt = ufunc->ntypes; int ni = ufunc->nin; int no = ufunc->nout; char *t; list = PyList_New(nt); if (list == NULL) { return NULL; } t = PyArray_malloc(no+ni+2); n = 0; for (k = 0; k < nt; k++) { for (j = 0; j<ni; j++) { t[j] = _typecharfromnum(ufunc->types[n]); n++; } t[ni] = '-'; t[ni+1] = '>'; for (j = 0; j < no; j++) { t[ni + 2 + j] = _typecharfromnum(ufunc->types[n]); n++; } str = PyUString_FromStringAndSize(t, no + ni + 2); PyList_SET_ITEM(list, k, str); } PyArray_free(t); return list; } static PyObject * ufunc_get_name(PyUFuncObject *ufunc) { return PyUString_FromString(ufunc->name); } static PyObject * ufunc_get_identity(PyUFuncObject *ufunc) { switch(ufunc->identity) { case PyUFunc_One: return PyInt_FromLong(1); case PyUFunc_Zero: return PyInt_FromLong(0); case PyUFunc_MinusOne: return PyInt_FromLong(-1); } Py_RETURN_NONE; } static PyObject * ufunc_get_signature(PyUFuncObject *ufunc) { if (!ufunc->core_enabled) { Py_RETURN_NONE; } return PyUString_FromString(ufunc->core_signature); } #undef _typecharfromnum /* * Docstring is now set from python * static char *Ufunctype__doc__ = NULL; */ //static: mic undefined symbol if we set static here NPY_NO_EXPORT PyGetSetDef mufunc_getset[] = { {"__doc__", (getter)ufunc_get_doc, NULL, NULL, NULL}, {"nin", (getter)ufunc_get_nin, NULL, NULL, NULL}, {"nout", (getter)ufunc_get_nout, NULL, NULL, NULL}, {"nargs", (getter)ufunc_get_nargs, NULL, NULL, NULL}, {"ntypes", (getter)ufunc_get_ntypes, NULL, NULL, NULL}, {"types", (getter)ufunc_get_types, NULL, NULL, NULL}, {"__name__", (getter)ufunc_get_name, NULL, NULL, NULL}, {"identity", (getter)ufunc_get_identity, NULL, NULL, NULL}, {"signature", (getter)ufunc_get_signature, NULL, NULL, NULL}, {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; /****************************************************************************** *** UFUNC TYPE OBJECT *** *****************************************************************************/ NPY_NO_EXPORT PyTypeObject PyMUFunc_Type = { #if defined(NPY_PY3K) PyVarObject_HEAD_INIT(NULL, 0) #else PyObject_HEAD_INIT(NULL) 0, /* ob_size */ #endif "micpy.mufunc", /* tp_name */ sizeof(PyUFuncObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor)mufunc_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ #if defined(NPY_PY3K) 0, /* tp_reserved */ #else 0, /* tp_compare */ #endif (reprfunc)mufunc_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ (ternaryfunc)mufunc_generic_call, /* tp_call */ (reprfunc)mufunc_repr, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ mufunc_methods, /* tp_methods */ 0, /* tp_members */ mufunc_getset, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ 0, /* tp_free */ 0, /* tp_is_gc */ 0, /* tp_bases */ 0, /* tp_mro */ 0, /* tp_cache */ 0, /* tp_subclasses */ 0, /* tp_weaklist */ 0, /* tp_del */ 0, /* tp_version_tag */ }; /* End of code for ufunc objects */
GB_binop__remainder_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__remainder_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__remainder_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__remainder_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__remainder_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__remainder_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__remainder_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__remainder_fp32) // C=scalar+B GB (_bind1st__remainder_fp32) // C=scalar+B' GB (_bind1st_tran__remainder_fp32) // C=A+scalar GB (_bind2nd__remainder_fp32) // C=A'+scalar GB (_bind2nd_tran__remainder_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = remainderf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = remainderf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_REMAINDER || GxB_NO_FP32 || GxB_NO_REMAINDER_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__remainder_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__remainder_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__remainder_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__remainder_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__remainder_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__remainder_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__remainder_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__remainder_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__remainder_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = remainderf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__remainder_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = remainderf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = remainderf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__remainder_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = remainderf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__remainder_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
solver.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <malloc.h> #include "litiv/3rdparty/ofdis/fdf/image.h" #include "litiv/3rdparty/ofdis/fdf/solver.h" #include "litiv/utils/defines.hpp" // only used here for compiler flags #if defined(_MSC_VER) #include <intrin.h> #else //(!defined(_MSC_VER)) #include <x86intrin.h> #endif //(!defined(_MSC_VER)) typedef __v4sf v4sf; //THIS IS A SLOW VERSION BUT READABLE //Perform n iterations of the sor_coupled algorithm //du and dv are used as initial guesses //The system form is the same as in opticalflow.c void sor_coupled_slow_but_readable(image_t *du, image_t *dv, image_t *a11, image_t *a12, image_t *a22, const image_t *b1, const image_t *b2, const image_t *dpsis_horiz, const image_t *dpsis_vert, const int iterations, const float omega) { int i,j,iter; for(iter = 0 ; iter<iterations ; iter++) { #if USE_OPENMP #pragma omp parallel for #endif //USE_OPENMP for(j=0 ; j<du->height ; j++) { float sigma_u,sigma_v,sum_dpsis,A11,A22,A12,B1,B2;//,det; for(i=0 ; i<du->width ; i++) { sigma_u = 0.0f; sigma_v = 0.0f; sum_dpsis = 0.0f; if(j>0) { sigma_u -= dpsis_vert->c1[(j-1)*du->stride+i]*du->c1[(j-1)*du->stride+i]; sigma_v -= dpsis_vert->c1[(j-1)*du->stride+i]*dv->c1[(j-1)*du->stride+i]; sum_dpsis += dpsis_vert->c1[(j-1)*du->stride+i]; } if(i>0) { sigma_u -= dpsis_horiz->c1[j*du->stride+i-1]*du->c1[j*du->stride+i-1]; sigma_v -= dpsis_horiz->c1[j*du->stride+i-1]*dv->c1[j*du->stride+i-1]; sum_dpsis += dpsis_horiz->c1[j*du->stride+i-1]; } if(j<du->height-1) { sigma_u -= dpsis_vert->c1[j*du->stride+i]*du->c1[(j+1)*du->stride+i]; sigma_v -= dpsis_vert->c1[j*du->stride+i]*dv->c1[(j+1)*du->stride+i]; sum_dpsis += dpsis_vert->c1[j*du->stride+i]; } if(i<du->width-1) { sigma_u -= dpsis_horiz->c1[j*du->stride+i]*du->c1[j*du->stride+i+1]; sigma_v -= dpsis_horiz->c1[j*du->stride+i]*dv->c1[j*du->stride+i+1]; sum_dpsis += dpsis_horiz->c1[j*du->stride+i]; } A11 = a11->c1[j*du->stride+i]+sum_dpsis; A12 = a12->c1[j*du->stride+i]; A22 = a22->c1[j*du->stride+i]+sum_dpsis; //det = A11*A22-A12*A12; B1 = b1->c1[j*du->stride+i]-sigma_u; B2 = b2->c1[j*du->stride+i]-sigma_v; // du->c1[j*du->stride+i] = (1.0f-omega)*du->c1[j*du->stride+i] +omega*( A22*B1-A12*B2)/det; // dv->c1[j*du->stride+i] = (1.0f-omega)*dv->c1[j*du->stride+i] +omega*(-A12*B1+A11*B2)/det; du->c1[j*du->stride+i] = (1.0f-omega)*du->c1[j*du->stride+i] + omega/A11 *(B1 - A12* dv->c1[j*du->stride+i] ); dv->c1[j*du->stride+i] = (1.0f-omega)*dv->c1[j*du->stride+i] + omega/A22 *(B2 - A12* du->c1[j*du->stride+i] ); } } } } // THIS IS A FASTER VERSION BUT UNREADABLE, ONLY OPTICAL FLOW WITHOUT OPENMP PARALLELIZATION // the first iteration is separated from the other to compute the inverse of the 2x2 block diagonal // each iteration is split in two first line / middle lines / last line, and the left block is computed separately on each line void sor_coupled(image_t *du, image_t *dv, image_t *a11, image_t *a12, image_t *a22, const image_t *b1, const image_t *b2, const image_t *dpsis_horiz, const image_t *dpsis_vert, const int iterations, const float omega){ //sor_coupled_slow(du,dv,a11,a12,a22,b1,b2,dpsis_horiz,dpsis_vert,iterations,omega); return; printf("test\n"); if(du->width<2 || du->height<2 || iterations < 1){ sor_coupled_slow_but_readable(du,dv,a11,a12,a22,b1,b2,dpsis_horiz,dpsis_vert,iterations,omega); return; } const int stride = du->stride, width = du->width; const int iterheight = du->height-1, iterline = (stride)/4, width_minus_1_sizeoffloat = sizeof(float)*(width-1); int j,iter,i,k; float *floatarray = (float*) memalign(16, stride*sizeof(float)*3); if(floatarray==NULL){ fprintf(stderr, "error in sor_coupled(): not enough memory\n"); exit(1); } float *f1 = floatarray; float *f2 = f1+stride; float *f3 = f2+stride; f1[0] = 0.0f; memset(&f1[width], 0, sizeof(float)*(stride-width)); memset(&f2[width-1], 0, sizeof(float)*(stride-width+1)); memset(&f3[width-1], 0, sizeof(float)*(stride-width+1)); { // first iteration v4sf *a11p = (v4sf*) a11->c1, *a12p = (v4sf*) a12->c1, *a22p = (v4sf*) a22->c1, *b1p = (v4sf*) b1->c1, *b2p = (v4sf*) b2->c1, *hp = (v4sf*) dpsis_horiz->c1, *vp = (v4sf*) dpsis_vert->c1; float *du_ptr = du->c1, *dv_ptr = dv->c1; v4sf *dub = (v4sf*) (du_ptr+stride), *dvb = (v4sf*) (dv_ptr+stride); { // first iteration - first line memcpy(f1+1, ((float*) hp), width_minus_1_sizeoffloat); memcpy(f2, du_ptr+1, width_minus_1_sizeoffloat); memcpy(f3, dv_ptr+1, width_minus_1_sizeoffloat); v4sf* hpl = (v4sf*) f1, *dur = (v4sf*) f2, *dvr = (v4sf*) f3; { // left block // reverse 2x2 diagonal block const v4sf dpsis = (*hpl) + (*hp) + (*vp); const v4sf A11 = (*a22p)+dpsis, A22 = (*a11p)+dpsis; const v4sf det = A11*A22 - (*a12p)*(*a12p); *a11p = A11/det; *a22p = A22/det; *a12p /= -det; // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vp)*(*dvb) + (*b2p); du_ptr[0] += omega*( a11p[0][0]*s1[0] + a12p[0][0]*s2[0] - du_ptr[0] ); dv_ptr[0] += omega*( a12p[0][0]*s1[0] + a22p[0][0]*s2[0] - dv_ptr[0] ); for(k=1;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } for(i=iterline;--i;){ // reverse 2x2 diagonal block const v4sf dpsis = (*hpl) + (*hp) + (*vp); const v4sf A11 = (*a22p)+dpsis, A22 = (*a11p)+dpsis; const v4sf det = A11*A22 - (*a12p)*(*a12p); *a11p = A11/det; *a22p = A22/det; *a12p /= -det; // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vp)*(*dvb) + (*b2p); for(k=0;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } } v4sf *vpt = (v4sf*) dpsis_vert->c1; v4sf *dut = (v4sf*) du->c1, *dvt = (v4sf*) dv->c1; for(j=iterheight;--j;){ // first iteration - middle lines memcpy(f1+1, ((float*) hp), width_minus_1_sizeoffloat); memcpy(f2, du_ptr+1, width_minus_1_sizeoffloat); memcpy(f3, dv_ptr+1, width_minus_1_sizeoffloat); v4sf* hpl = (v4sf*) f1, *dur = (v4sf*) f2, *dvr = (v4sf*) f3; { // left block // reverse 2x2 diagonal block const v4sf dpsis = (*hpl) + (*hp) + (*vpt) + (*vp); const v4sf A11 = (*a22p)+dpsis, A22 = (*a11p)+dpsis; const v4sf det = A11*A22 - (*a12p)*(*a12p); *a11p = A11/det; *a22p = A22/det; *a12p /= -det; // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*vp)*(*dvb) + (*b2p); du_ptr[0] += omega*( a11p[0][0]*s1[0] + a12p[0][0]*s2[0] - du_ptr[0] ); dv_ptr[0] += omega*( a12p[0][0]*s1[0] + a22p[0][0]*s2[0] - dv_ptr[0] ); for(k=1;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } for(i=iterline;--i;){ // reverse 2x2 diagonal block const v4sf dpsis = (*hpl) + (*hp) + (*vpt) + (*vp); const v4sf A11 = (*a22p)+dpsis, A22 = (*a11p)+dpsis; const v4sf det = A11*A22 - (*a12p)*(*a12p); *a11p = A11/det; *a22p = A22/det; *a12p /= -det; // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*vp)*(*dvb) + (*b2p); for(k=0;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } } { // first iteration - last line memcpy(f1+1, ((float*) hp), width_minus_1_sizeoffloat); memcpy(f2, du_ptr+1, width_minus_1_sizeoffloat); memcpy(f3, dv_ptr+1, width_minus_1_sizeoffloat); v4sf* hpl = (v4sf*) f1, *dur = (v4sf*) f2, *dvr = (v4sf*) f3; { // left block // reverse 2x2 diagonal block const v4sf dpsis = (*hpl) + (*hp) + (*vpt); const v4sf A11 = (*a22p)+dpsis, A22 = (*a11p)+dpsis; const v4sf det = A11*A22 - (*a12p)*(*a12p); *a11p = A11/det; *a22p = A22/det; *a12p /= -det; // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*b2p); du_ptr[0] += omega*( a11p[0][0]*s1[0] + a12p[0][0]*s2[0] - du_ptr[0] ); dv_ptr[0] += omega*( a12p[0][0]*s1[0] + a22p[0][0]*s2[0] - dv_ptr[0] ); for(k=1;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } for(i=iterline;--i;){ // reverse 2x2 diagonal block const v4sf dpsis = (*hpl) + (*hp) + (*vpt); const v4sf A11 = (*a22p)+dpsis, A22 = (*a11p)+dpsis; const v4sf det = A11*A22 - (*a12p)*(*a12p); *a11p = A11/det; *a22p = A22/det; *a12p /= -det; // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*b2p); for(k=0;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } } } for(iter=iterations;--iter;) // other iterations { v4sf *a11p = (v4sf*) a11->c1, *a12p = (v4sf*) a12->c1, *a22p = (v4sf*) a22->c1, *b1p = (v4sf*) b1->c1, *b2p = (v4sf*) b2->c1, *hp = (v4sf*) dpsis_horiz->c1, *vp = (v4sf*) dpsis_vert->c1; float *du_ptr = du->c1, *dv_ptr = dv->c1; v4sf *dub = (v4sf*) (du_ptr+stride), *dvb = (v4sf*) (dv_ptr+stride); { // other iteration - first line memcpy(f1+1, ((float*) hp), width_minus_1_sizeoffloat); memcpy(f2, du_ptr+1, width_minus_1_sizeoffloat); memcpy(f3, dv_ptr+1, width_minus_1_sizeoffloat); v4sf* hpl = (v4sf*) f1, *dur = (v4sf*) f2, *dvr = (v4sf*) f3; { // left block // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vp)*(*dvb) + (*b2p); du_ptr[0] += omega*( a11p[0][0]*s1[0] + a12p[0][0]*s2[0] - du_ptr[0] ); dv_ptr[0] += omega*( a12p[0][0]*s1[0] + a22p[0][0]*s2[0] - dv_ptr[0] ); for(k=1;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } for(i=iterline;--i;){ // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vp)*(*dvb) + (*b2p); for(k=0;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } } v4sf *vpt = (v4sf*) dpsis_vert->c1; v4sf *dut = (v4sf*) du->c1, *dvt = (v4sf*) dv->c1; for(j=iterheight;--j;) { // other iteration - middle lines memcpy(f1+1, ((float*) hp), width_minus_1_sizeoffloat); memcpy(f2, du_ptr+1, width_minus_1_sizeoffloat); memcpy(f3, dv_ptr+1, width_minus_1_sizeoffloat); v4sf* hpl = (v4sf*) f1, *dur = (v4sf*) f2, *dvr = (v4sf*) f3; { // left block // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*vp)*(*dvb) + (*b2p); du_ptr[0] += omega*( a11p[0][0]*s1[0] + a12p[0][0]*s2[0] - du_ptr[0] ); dv_ptr[0] += omega*( a12p[0][0]*s1[0] + a22p[0][0]*s2[0] - dv_ptr[0] ); for(k=1;k<4;k++) { const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } for(i=iterline; --i;) { // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*vp)*(*dub) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*vp)*(*dvb) + (*b2p); for(k=0;k<4;k++) { const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; vp+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; dub+=1; dvb +=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } } { // other iteration - last line memcpy(f1+1, ((float*) hp), width_minus_1_sizeoffloat); memcpy(f2, du_ptr+1, width_minus_1_sizeoffloat); memcpy(f3, dv_ptr+1, width_minus_1_sizeoffloat); v4sf* hpl = (v4sf*) f1, *dur = (v4sf*) f2, *dvr = (v4sf*) f3; { // left block // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*b2p); du_ptr[0] += omega*( a11p[0][0]*s1[0] + a12p[0][0]*s2[0] - du_ptr[0] ); dv_ptr[0] += omega*( a12p[0][0]*s1[0] + a22p[0][0]*s2[0] - dv_ptr[0] ); for(k=1;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } for(i=iterline;--i;){ // do one iteration const v4sf s1 = (*hp)*(*dur) + (*vpt)*(*dut) + (*b1p); const v4sf s2 = (*hp)*(*dvr) + (*vpt)*(*dvt) + (*b2p); for(k=0;k<4;k++){ const float B1 = hpl[0][k]*du_ptr[k-1] + s1[k]; const float B2 = hpl[0][k]*dv_ptr[k-1] + s2[k]; du_ptr[k] += omega*( a11p[0][k]*B1 + a12p[0][k]*B2 - du_ptr[k] ); dv_ptr[k] += omega*( a12p[0][k]*B1 + a22p[0][k]*B2 - dv_ptr[k] ); } // increment pointer hpl+=1; hp+=1; vpt+=1; a11p+=1; a12p+=1; a22p+=1; dur+=1; dvr+=1; dut+=1; dvt+=1; b1p+=1; b2p+=1; du_ptr += 4; dv_ptr += 4; } } } free(floatarray); } //THIS IS A SLOW VERSION BUT READABLE //Perform n iterations of the sor_coupled algorithm //du is used as initial guesses //The system form is the same as in opticalflow.c void sor_coupled_slow_but_readable_DE(image_t *du, const image_t *a11, const image_t *b1, const image_t *dpsis_horiz, const image_t *dpsis_vert, const int iterations, const float omega) { int i,j,iter; for(iter = 0 ; iter<iterations ; iter++) { #if USE_OPENMP #pragma omp parallel for #endif //USE_OPENMP for(j=0 ; j<du->height ; j++) { float sigma_u,sum_dpsis,A11,B1; for(i=0 ; i<du->width ; i++){ sigma_u = 0.0f; sum_dpsis = 0.0f; if(j>0) { sigma_u -= dpsis_vert->c1[(j-1)*du->stride+i]*du->c1[(j-1)*du->stride+i]; sum_dpsis += dpsis_vert->c1[(j-1)*du->stride+i]; } if(i>0) { sigma_u -= dpsis_horiz->c1[j*du->stride+i-1]*du->c1[j*du->stride+i-1]; sum_dpsis += dpsis_horiz->c1[j*du->stride+i-1]; } if(j<du->height-1) { sigma_u -= dpsis_vert->c1[j*du->stride+i]*du->c1[(j+1)*du->stride+i]; sum_dpsis += dpsis_vert->c1[j*du->stride+i]; } if(i<du->width-1) { sigma_u -= dpsis_horiz->c1[j*du->stride+i]*du->c1[j*du->stride+i+1]; sum_dpsis += dpsis_horiz->c1[j*du->stride+i]; } A11 = a11->c1[j*du->stride+i]+sum_dpsis; B1 = b1->c1[j*du->stride+i]-sigma_u; du->c1[j*du->stride+i] = (1.0f-omega)*du->c1[j*du->stride+i] +omega*( B1/A11 ); } } } }
rnn_impl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file rnn_impl.h * \brief * \author Shu Zhang */ #ifndef MXNET_OPERATOR_RNN_IMPL_H_ #define MXNET_OPERATOR_RNN_IMPL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <algorithm> #include <map> #include <vector> #include <string> #include <utility> #include "./math.h" #include "./math_functions-inl.h" #include "./operator_common.h" #include "./mshadow_op.h" #include "./linalg.h" namespace mxnet { namespace op { template<typename DType> inline DType sigmoid(DType x) { return 1.0f / (1.0f + exp(-x)); } template<typename DType> void LstmForwardTrainingSingleLayer(DType* ws, DType* rs, bool state_outputs, bool bid, const int T, const int N, const int I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, const Tensor<cpu, 2, DType> &cx, const Tensor<cpu, 3, DType> &y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H)); const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); DType *c_ptr = bid ? rs + T * N * H * 7 : rs; Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta = 0.0; const int cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int i = 0; i < T; ++i) { int t = bid ? T - 1 - i : i; linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); #pragma omp parallel for num_threads(omp_threads) for (int jk = 0; jk < cell_size; ++jk) { int j = jk / H; int k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); h[j][k] = ht; // reserve y[t][j][k + offset] = ht; c[i][j][k] = ct; ifgo[i][j][k][0] = it; ifgo[i][j][k][1] = ft; ifgo[i][j][k][2] = gt; ifgo[i][j][k][3] = ot; if (i == T - 1 && state_outputs) { hy_ptr[jk] = ht; cy_ptr[jk] = ct; } } } } template <typename DType> void LstmForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const int T, const int N, const int I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr) { const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const int b_size = 2 * H * 4; const int r_size = D * T * N * H * 6; const int y_offset = T * N * H * 5; const int cell_size = N * H; int idx = 0; // state & cell state's idx; for (int i = 0; i < L; ++i) { const int input_size = i ? H * D : I; const int w_size = (input_size + H) * H * 4; Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(rs + y_offset, Shape3(T, N, H * D)); LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, false, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, true, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; x_ptr = y.dptr_; rs += r_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } } } memcpy(y_ptr, rs + y_offset, T * N * H * D * sizeof(DType)); } template<typename DType> void LstmForwardInferenceSingleLayer(DType* ws, bool state_outputs, bool bid, const int T, const int N, const int I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, const Tensor<cpu, 2, DType> &cx, const Tensor<cpu, 3, DType> &y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4)); Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta = 0.0; const int cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int i = 0; i < T; ++i) { int t = bid ? T - 1 - i : i; linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); #pragma omp parallel for num_threads(omp_threads) for (int jk = 0; jk < cell_size; ++jk) { int j = jk / H; int k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); y[t][j][k + offset] = ht; if (i == T - 1 && state_outputs) { hy_ptr[jk] = ht; cy_ptr[jk] = ct; } else { h[j][k] = ht; c[j][k] = ct; } } } } template <typename DType> void LstmForwardInference(DType* ws, bool state_outputs, const int L, const int D, const int T, const int N, const int I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr) { const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const int b_size = 2 * H * 4; const int cell_size = N * H; DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2; DType* y_cur_ptr = y_ptr; int idx = 0; // state & cell state's idx; bool flag = L % 2 ? false : true; for (int i = 0; i < L; ++i) { const int input_size = i ? H * D : I; const int w_size = (input_size + H) * H * 4; // If bidirectional, need space to save current layer output y. if (D == 2) { y_cur_ptr = flag ? y_tmp_ptr : y_ptr; flag = !flag; } Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, H * D)); LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); // If bidirectional, then calculate the reverse direction's forward result. if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } // Don't need to move pointer in the last layer. if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; x_ptr = y_cur_ptr; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } } } } template <typename DType> void LstmBackwardSingleLayer(DType* ws, DType* rs, DType* tmp_buf, bool bid, const int T, const int N, const int I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, const Tensor<cpu, 2, DType> &cx, const Tensor<cpu, 3, DType> &y, const Tensor<cpu, 3, DType> &dy, const Tensor<cpu, 2, DType> &dx, const Tensor<cpu, 2, DType> &dhx, const Tensor<cpu, 2, DType> &dcx, DType* dhy_ptr, DType* dcy_ptr, DType* w_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I)); Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4)); Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4)); DType *c_ptr = bid ? rs + T * N * H * 7 : rs; const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); memset(dwh.dptr_, 0, H * H * 4 * sizeof(DType)); memset(dbx.dptr_, 0, H * 4 * sizeof(DType)); memset(dbh.dptr_, 0, H * 4 * sizeof(DType)); Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H)); Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H)); Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta0 = 0.0; const DType beta1 = 1.0; const DType beta2 = 2.0; const int cell_size = N * H; if (dhy_ptr != NULL) { memcpy(dh.dptr_, dhy_ptr, cell_size * sizeof(DType)); } if (dcy_ptr != NULL) { memcpy(dc.dptr_, dcy_ptr, cell_size * sizeof(DType)); } const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int i = T - 1; i >= 0; --i) { int t = bid ? T - 1 - i : i; int tnext = bid ? t + 1 : t - 1; const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx; const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx; const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx; const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx; #pragma omp parallel for num_threads(omp_threads) for (int jk = 0; jk < cell_size; ++jk) { int j = jk / H; int k = jk % H; DType tc = tanh(c[i][j][k]); DType it = ifgo[i][j][k][0]; DType ft = ifgo[i][j][k][1]; DType gt = ifgo[i][j][k][2]; DType ot = ifgo[i][j][k][3]; dh[j][k] += dy[t][j][k + offset]; dc[j][k] += dh[j][k] * ot * (1 - tc * tc); difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it); difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft); difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt); difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot); if (req_statecell != kNullOp || i > 0) { dcnext[j][k] = dc[j][k] * ft; } if (i) { htmp[j][k] = y[tnext][j][k + offset]; } } Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4)); if (req_state != kNullOp || i > 0) { linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false); } if (req_params != kNullOp) { if (req_params != kAddTo) { linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false); } else { linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false); // generate dwx every time step for AddTo Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4)); linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false); } } } Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4)); if (req_data != kNullOp) { linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false); } if (req_params != kNullOp && req_params != kAddTo) { linalg_gemm(dyx, x, dwx, alpha, beta0, true, false); } const int row = T * N; const int col = H * 4; if (req_params != kNullOp) { if (req_params != kAddTo) { for (int i = 0; i < row; ++i) { #pragma omp parallel for num_threads(omp_threads) for (int j = 0; j < col; ++j) { dbx[j] += dyx[i][j]; dbh[j] = dbx[j]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T)); memset(tmp_dbx.dptr_, 0, col * T * sizeof(DType)); memset(tmp_dbh.dptr_, 0, col * T * sizeof(DType)); for (int t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int j = 0; j < col; ++j) { for (int i = 0; i < N; ++i) { tmp_dbx[j][t] += dyx[t * N + i][j]; tmp_dbh[j][t] = tmp_dbx[j][t]; } } #pragma omp parallel for num_threads(omp_threads) for (int j = 0; j < col; ++j) { dbx[j] += tmp_dbx[j][t] + dbx[j]; dbh[j] += tmp_dbh[j][t] + dbh[j]; } } } } } template <typename DType> void LstmBackward(DType* ws, DType* rs, const int L, const int D, const int T, const int N, const int I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dcy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dcx_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell) { DType* tmp_buf = ws; DType* ws2 = tmp_buf + 8 * T * H; const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H)); const int b_size = 2 * H * 4; const int r_size = D * T * N * H * 6; const int y_offset = T * N * H * 5; const int w_size1 = (I + H) * H * 4; // first layer const int w_size2 = (D * H + H) * H * 4; // other layers const int cell_size = N * H; DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3; for (int i = L - 1; i >= 0; --i) { const int input_size = i ? H * D : I; const int w_size = i ? w_size2 : w_size1; int idx = i * D; DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr; DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr; DType* db_cur_ptr = db_ptr + i * b_size * D; DType* rs_cur_ptr = rs + i * r_size; DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : NULL; DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : NULL; Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D)); Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D)); Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size)); LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); if (D == 2) { w_cur_ptr += w_size; dw_cur_ptr += w_size; db_cur_ptr += b_size; ++idx; dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : NULL; dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : NULL; LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); } dy_ptr = dx.dptr_; } } template<typename DType> void GruForwardInferenceSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const int T, const int N, const int I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gemmC2 + N * 3 * H; DType* zt = rt + N * H; DType* nt = zt + N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL; DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2: NULL; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (int t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } gemmC1_t = gemmC1 + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int rtb = i * 3 * H; int ztb = i * 3 * H + H; int ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[0], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int rtb = i * 3 * H; int ztb = i * 3 * H + H; int ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardInference(DType* ws, bool state_outputs, const int L, const int D, const int T, const int N, int I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr) { DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; DType* y_tmp = ws; DType* y_l = x_ptr; DType* tmp_buf = y_tmp + D * T * N * H; DType* ws2 = y_tmp + D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; for (int l = 0; l < L; l++) { Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I)); if ((L + l) % 2) { y_l = y_ptr; } else { y_l = y_tmp; } Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l); hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } } template<typename DType> void GruForwardTrainingSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const int T, const int N, const int I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gateR; DType* zt = gateZ; DType* nt = gateN; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL; DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2 : NULL; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_Mnh = Mnh + T * N * H; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (int t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; gemmC1_t = gemmC1 + t * N * 3 * H; DType* Mnht = Mnh + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int rtb = i * 3 * H; int ztb = i * 3 * H + H; int ntb = i * 3 * H + 2 * H; Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { rt = back_gateR + (T - 1 - t) * N * H; zt = back_gateZ + (T - 1 - t) * N * H; nt = back_gateN + (T - 1 - t) * N * H; gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[0], back_wh, dgemmC2, alpha, beta, true, true); DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int rtb = i * 3 * H; int ztb = i * 3 * H + H; int ntb = i * 3 * H + 2 * H; back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const int T, const int N, int I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr) { DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; DType* gateR_l = rs; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* tmp_buf = Mnh_l + L * D * T * N * H; DType* ws2 = Mnh_l + L * D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l); gateR_l = gateR_l + T * D * N * H; gateZ_l = gateZ_l + T * D * N * H; gateN_l = gateN_l + T * D * N * H; Mnh_l = Mnh_l + T * D * N * H; hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } memcpy(y_ptr, y_l, T * N * H * D * sizeof(DType)); } template <typename DType> void GruBackwardSingleLayer(DType* ws, DType* tmp_buf, const int D, const int T, const int N, const int I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* dx, DType* dhx, DType* dwx, DType* dwh, DType* dbx, DType* dbh, int req_data, int req_params, int req_state) { DType* dyt; DType* ht1; // [N, D, H] DType* rt; DType* zt; DType* nt; DType* dat; DType* dart; DType* dar = ws; // [T, N, 3 * H] DType* da = dar + T * N * 3 * H; // [T, N, 3 * H] DType* dht1 = da + T * N * 3 * H; // [D, N, H] DType* hx_ = dht1 + D * N * H; // [N, D, H] DType* Mnht = Mnh; DType* back_ht1; DType* back_dht1 = dht1 + N * H; // [N, H] DType* back_Mnht = Mnh + T * N * H; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_dwx = dwx + I * 3 * H + H * 3 * H; DType* back_dwh = dwh + I * 3 * H + H * 3 * H; DType* back_dbx = dbx + 3 * H * 2; DType* back_dbh = dbh + 3 * H * 2; DType alpha = 1.0; DType beta = 0.0; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N * H; ++i) { if (dhy_ptr) { dht1[i] = dhy_ptr[i]; } else { dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + j] = hx[i][j]; } } if (D == 2) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N * H; ++i) { if (dhy_ptr) { back_dht1[i] = dhy_ptr[N * H + i]; } else { back_dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + H + j] = hx[N + i][j]; } } } for (int t = T - 1; t >= 0; --t) { if (t) { ht1 = y_ptr + (t - 1) * N * D * H; } else { ht1 = hx_; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { dht1[i * H + j] += dyt[i * D * H + j]; } } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; Mnht = Mnh + t * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int nid = i * 3 * H + 2 * H + j; int zid = i * 3 * H + H + j; int rid = i * 3 * H + j; int id = i * H + j; dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; dht1[id] = dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H)); Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false); } // dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H)); Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H)); Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N)); linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (int j = 0; j < N * T; ++j) { dbx[i] += da[j * 3 * H + i]; dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); memset(tmp_dbx.dptr_, 0, H * T * 3 * sizeof(DType)); memset(tmp_dbh.dptr_, 0, H * T * 3 * sizeof(DType)); for (int t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (int j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { dbx[i] += tmp_dbx[i][t] + dbx[i]; dbh[i] += tmp_dbh[i][t] + dbh[i]; } } } } alpha = 1.0; beta = 0.0; // dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false); } // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false); } if (D == 2) { for (int t = 0; t < T; ++t) { if (t == T-1) { back_ht1 = hx_; } else { back_ht1 = y_ptr + (t + 1) * N * D * H; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { back_dht1[i * H + j] += dyt[i * D * H + H + j]; } } rt = back_gateR + t * N * H; zt = back_gateZ + t * N * H; nt = back_gateN + t * N * H; back_Mnht = Mnh + (T + t) * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int nid = i * 3 * H + 2 * H + j; int zid = i * 3 * H + H + j; int rid = i * 3 * H + j; int id = i * H + j; dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; back_dht1[id] = back_dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H)); linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false); // dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H)); Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H)); Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N)); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false); } linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (int j = 0; j < N * T; ++j) { back_dbx[i] += da[j * 3 * H + i]; back_dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); memset(tmp_dbx.dptr_, 0, H * T * 3 * sizeof(DType)); memset(tmp_dbh.dptr_, 0, H * T * 3 * sizeof(DType)); for (int t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (int j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { back_dbx[i] += tmp_dbx[i][t] + back_dbx[i]; back_dbh[i] += tmp_dbh[i][t] + back_dbh[i]; } } } } alpha = 1.0; beta = 1.0; // dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false); } alpha = 1.0; beta = 0.0; // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false); } } if (req_state != kNullOp) { memcpy(dhx, dht1, N * H * D * sizeof(DType)); } } template <typename DType> void GruBackward(DType* ws, DType* rs, const int L, const int D, const int T, const int N, int I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dw_ptr, int req_data, int req_params, int req_state) { DType* wx = w_ptr; DType* dwx = dw_ptr; DType* dwh = dwx + I * H * 3; DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* gateR_l = rs + (L - 1) * T * D * N * H; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* tmp_buf = Mnh_l + L * D * T * N * H; DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2; DType* ws2 = dx_l + T * N * D * H; DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* wh_l = wx_l; if (L == 1) { wh_l = wh_l + I * H * 3; } else { wh_l = wh_l + (D * H) * H * 3; } DType* dhy_l = NULL; if (dhy_ptr) dhy_l = dhy_ptr + (L - 1) * D * N * H; DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* dwh_l = NULL; if (L == 1) { dwh_l = dwx_l + I * H * 3; } else { dwh_l = dwx_l + (D * H) * H * 3; } DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2; DType* dbh_l = dbx_l + 3 * H; DType* dhx_l = dhx_ptr + (L - 1) * D * N * H; DType* dy_l = dy_ptr; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H)); int inputsize = I; DType* y_tmp = y_l - T * N * H * D; for (int l = L - 1; l >= 0; --l) { if (l == 0) { I = inputsize; y_tmp = x_ptr; dx_l = dx_ptr; } else { I = D * H; } Tensor<cpu, 2, DType> hx_l = hx[l]; Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l, dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l, dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state); if (l > 0) { memcpy(dy_l, dx_l, T * N * H * D * sizeof(DType)); gateR_l = gateR_l - T * D * N * H; gateZ_l = gateZ_l - T * D * N * H; gateN_l = gateN_l - T * D * N * H; Mnh_l = Mnh_l - T * D * N * H; dhx_l = dhx_l - D * N * H; if (dhy_l) dhy_l = dhy_l - D * N * H; y_l = y_l - T * N * H * D; y_tmp = y_l; if (l == 1) { wx_l = wx_l - (inputsize + H) * H * 3 * D; wh_l = wx_l + inputsize * 3 * H; dwx_l = dwx_l - (inputsize + H) * H * 3 * D; dwh_l = dwx_l + inputsize * 3 * H; } else { wx_l = wx_l - (I + H) * H * 3 * D; wh_l = wx_l + I * 3 * H; dwx_l = dwx_l - (I + H) * H * 3 * D; dwh_l = dwx_l + I * 3 * H; } dbx_l = dbx_l - D * 3 * H * 2; dbh_l = dbx_l + 3 * H; } } } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_RNN_IMPL_H_
kmeanCPM_paralelo.c
#include <stdlib.h> #include <stdio.h> #define N 600000 //muestras #define G 200 //centroides long V[N]; long R[G]; int A[G]; void kmean(int fN, int fK, long fV[], long fR[], int fA[]) { int i, j, min, iter = 0; long dif, t; long fS[G]; int fD[N]; do { #pragma omp parallel for schedule(dynamic, 50000) for (i = 0; i < fN; i++) { min = 0; dif = abs(fV[i] - fR[0]); for (j = 1; j < fK; j++) if (abs(fV[i] - fR[j]) < dif) { min = j; dif = abs(fV[i] - fR[j]); } fD[i] = min; } for (i = 0; i < fK; i++) fS[i] = fA[i] = 0; for (i = 0; i < fN; i++) { fS[fD[i]] += fV[i]; fA[fD[i]]++; } dif = 0; for (i = 0; i < fK; i++) { t = fR[i]; if (fA[i]) fR[i] = fS[i] / fA[i]; dif += abs(t - fR[i]); } iter++; } while (dif); printf("iter %d\n", iter); } void qs(int ii, int fi, long fV[], int fA[]) { int i, f, j; long pi, pa, vtmp, vta, vfi, vfa; pi = fV[ii]; pa = fA[ii]; i = ii + 1; f = fi; vtmp = fV[i]; vta = fA[i]; while (i <= f) { if (vtmp < pi) { fV[i - 1] = vtmp; fA[i - 1] = vta; i++; vtmp = fV[i]; vta = fA[i]; } else { vfi = fV[f]; vfa = fA[f]; fV[f] = vtmp; fA[f] = vta; f--; vtmp = vfi; vta = vfa; } } fV[i - 1] = pi; fA[i - 1] = pa; if (ii < f) qs(ii, f, fV, fA); if (i < fi) qs(i, fi, fV, fA); } int main() { int i; // Inicialización for (i = 0; i < N; i++) V[i] = (rand() % rand()) / N; // primers candidats for (i = 0; i < G; i++) R[i] = V[i]; // calcular els G mes representatius kmean(N, G, V, R, A); qs(0, G - 1, R, A); for (i = 0; i < G; i++) printf("R[%d] : %ld te %d agrupats\n", i, R[i], A[i]); return (0); }
primitives_inl.h
/* * nvbio * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once namespace nvbio { // return true if any item in the range [0,n) evaluates to true // template <typename PredicateIterator> bool any( const host_tag tag, const uint32 n, const PredicateIterator pred) { return thrust::reduce( pred, pred + n, false, thrust::logical_or<bool>() ); } // return true if all items in the range [0,n) evaluate to true // template <typename PredicateIterator> bool all( const host_tag tag, const uint32 n, const PredicateIterator pred) { return thrust::reduce( pred, pred + n, true, thrust::logical_and<bool>() ); } #if defined(__CUDACC__) // return true if any item in the range [0,n) evaluates to true // template <typename PredicateIterator> bool any( const device_tag tag, const uint32 n, const PredicateIterator pred) { return cuda::any( n, pred ); } // return true if any item in the range [0,n) evaluates to true // template <typename PredicateIterator> bool all( const device_tag tag, const uint32 n, const PredicateIterator pred) { return cuda::all( n, pred ); } #endif // return true if any item in the range [0,n) evaluates to true // template <typename system_tag, typename PredicateIterator> bool any( const uint32 n, const PredicateIterator pred) { return any( system_tag(), n, pred ); } // return true if all items in the range [0,n) evaluate to true // template <typename system_tag, typename PredicateIterator> bool all( const uint32 n, const PredicateIterator pred) { return all( system_tag(), n, pred ); } // a pseudo-iterator to evaluate the predicate (it1[i] <= it2[i]) for arbitrary iterator pairs // template <typename Iterator1, typename Iterator2> struct is_sorted_iterator { typedef bool value_type; typedef value_type& reference; typedef value_type const_reference; typedef value_type* pointer; typedef typename std::iterator_traits<Iterator1>::difference_type difference_type; typedef typename std::iterator_traits<Iterator1>::iterator_category iterator_category; // constructor NVBIO_FORCEINLINE NVBIO_HOST_DEVICE is_sorted_iterator(const Iterator1 _it1, const Iterator2 _it2) : it1( _it1 ), it2( _it2 ) {} // dereference operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator[] (const uint64 i) const { return it1[i] <= it2[i]; } // dereference operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator* () const { return it1[0] <= it2[0]; } // dereference operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE is_sorted_iterator& operator++ () { ++it1; ++it2; return *this; } Iterator1 it1; Iterator2 it2; }; // operator+ template <typename T1, typename T2> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE is_sorted_iterator<T1,T2> operator+ (const is_sorted_iterator<T1,T2> it, const int64 i) { return is_sorted_iterator<T1,T2>( it.it1 + i, it.it2 + i ); } // operator- template <typename T1, typename T2> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE int64 operator- (const is_sorted_iterator<T1,T2> it1, const is_sorted_iterator<T1,T2> it2) { return it1.it1 - it2.it1; } // operator!= template <typename T1, typename T2> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator!= (const is_sorted_iterator<T1,T2> it1, const is_sorted_iterator<T1,T2> it2) { return it1.it1 != it2.it1; } // operator== template <typename T1, typename T2> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator== (const is_sorted_iterator<T1,T2> it1, const is_sorted_iterator<T1,T2> it2) { return it1.it1 == it2.it1; } // a pseudo-iterator to evaluate the predicate (hd[i] || (it1[i] <= it2[i])) for arbitrary iterator pairs // template <typename Iterator1, typename Iterator2, typename Headflags> struct is_segment_sorted_iterator { typedef bool value_type; typedef value_type& reference; typedef value_type const_reference; typedef value_type* pointer; typedef typename std::iterator_traits<Iterator1>::difference_type difference_type; typedef typename std::iterator_traits<Iterator1>::iterator_category iterator_category; // constructor NVBIO_FORCEINLINE NVBIO_HOST_DEVICE is_segment_sorted_iterator(const Iterator1 _it1, const Iterator2 _it2, const Headflags _hd) : it1( _it1 ), it2( _it2 ), hd(_hd) {} // dereference operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator[] (const uint64 i) const { return hd[i] || (it1[i] <= it2[i]); } // dereference operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator* () const { return hd[0] || (it1[0] <= it2[0]); } // dereference operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE is_segment_sorted_iterator& operator++ () { ++it1; ++it2; ++hd; return *this; } Iterator1 it1; Iterator2 it2; Headflags hd; }; // operator+ template <typename T1, typename T2, typename H> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE is_segment_sorted_iterator<T1,T2,H> operator+ (const is_segment_sorted_iterator<T1,T2,H> it, const int64 i) { return is_segment_sorted_iterator<T1,T2,H>( it.it1 + i, it.it2 + i, it.hd + i ); } // operator- template <typename T1, typename T2, typename H> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE int64 operator- (const is_segment_sorted_iterator<T1,T2,H> it1, const is_segment_sorted_iterator<T1,T2,H> it2) { return it1.it1 - it2.it1; } // operator!= template <typename T1, typename T2, typename H> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator!= (const is_segment_sorted_iterator<T1,T2,H> it1, const is_segment_sorted_iterator<T1,T2,H> it2) { return it1.it1 != it2.it1; } // operator== template <typename T1, typename T2, typename H> NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator== (const is_segment_sorted_iterator<T1,T2,H> it1, const is_segment_sorted_iterator<T1,T2,H> it2) { return it1.it1 == it2.it1; } // return true if the items in the range [0,n) are sorted // template <typename system_tag, typename Iterator> bool is_sorted( const uint32 n, const Iterator values) { return all<system_tag>( n-1, is_sorted_iterator<Iterator,Iterator>( values, values+1 ) ); } // return true if the items in the range [0,n) are sorted by segment, where // the beginning of each segment is identified by a set head flag // template <typename system_tag, typename Iterator, typename Headflags> bool is_segment_sorted( const uint32 n, const Iterator values, const Headflags flags) { return all<system_tag>( n-1, is_segment_sorted_iterator<Iterator,Iterator,Headflags>( values, values+1, flags+1 ) ); } // invoke a functor for each element of the given sequence // template <typename Iterator, typename Functor> void for_each( const host_tag tag, const uint64 n, const Iterator in, Functor functor) { #if defined(_OPENMP) #pragma omp parallel for if (n >= 256) #endif for (int64 i = 0; i < int64(n); ++i) functor( in[i] ); } // invoke a functor for each element of the given sequence // template <typename Iterator, typename Functor> void for_each( const device_tag tag, const uint64 n, const Iterator in, Functor functor) { thrust::for_each( in, in + n, functor ); } // invoke a functor for each element of the given sequence // template <typename system_tag, typename Iterator, typename Functor> void for_each( const uint64 n, const Iterator in, Functor functor) { return for_each( system_tag(), n, in, functor ); } // apply a functor to each element of the given sequence // template <typename Iterator, typename Output, typename Functor> void transform( const device_tag tag, const uint64 n, const Iterator in, const Output out, const Functor functor) { thrust::transform( in, in + n, out, functor ); } // apply a functor to each element of the given sequence // template <typename Iterator, typename Output, typename Functor> void transform( const host_tag tag, const uint32 n, const Iterator in, const Output out, const Functor functor) { #if defined(_OPENMP) #pragma omp parallel for if (n >= 256) #endif for (int64 i = 0; i < int64(n); ++i) out[i] = functor( in[i] ); } // apply a binary functor to each pair of elements of the given sequences // template <typename Iterator1, typename Iterator2, typename Output, typename Functor> void transform( const device_tag tag, const uint32 n, const Iterator1 in1, const Iterator2 in2, const Output out, const Functor functor) { thrust::transform( in1, in1 + n, in2, out, functor ); } // apply a binary functor to each pair of elements of the given sequences // template <typename Iterator1, typename Iterator2, typename Output, typename Functor> void transform( const host_tag tag, const uint32 n, const Iterator1 in1, const Iterator2 in2, const Output out, const Functor functor) { #if defined(_OPENMP) #pragma omp parallel for if (n >= 256) #endif for (int64 i = 0; i < int64(n); ++i) out[i] = functor( in1[i], in2[i] ); } // apply a functor to each element of the given sequence // template <typename system_tag, typename Iterator, typename Output, typename Functor> void transform( const uint32 n, const Iterator in, const Output out, const Functor functor) { transform( system_tag(), n, in, out, functor ); } // apply a binary functor to each pair of elements of the given sequences // template <typename system_tag, typename Iterator1, typename Iterator2, typename Output, typename Functor> void transform( const uint32 n, const Iterator1 in1, const Iterator2 in2, const Output out, const Functor functor) { transform( system_tag(), n, in1, in2, out, functor ); } // host-wide reduce // // \param n number of items to reduce // \param in a system iterator // \param op the binary reduction operator // \param temp_storage some temporary storage // template <typename InputIterator, typename BinaryOp> typename std::iterator_traits<InputIterator>::value_type reduce( host_tag tag, const uint32 n, InputIterator in, BinaryOp op, nvbio::vector<host_tag,uint8>& temp_storage) { return thrust::reduce( in, in + n, 0u, op ); } // host-wide inclusive scan // // \param n number of items to reduce // \param in a device input iterator // \param out a device output iterator // \param op the binary reduction operator // \param temp_storage some temporary storage // template <typename InputIterator, typename OutputIterator, typename BinaryOp> void inclusive_scan( host_tag tag, const uint32 n, InputIterator in, OutputIterator out, BinaryOp op, nvbio::vector<host_tag,uint8>& temp_storage) { thrust::inclusive_scan( in, in + n, out, op ); } // host-wide exclusive scan // // \param n number of items to reduce // \param in a device input iterator // \param out a device output iterator // \param op the binary reduction operator // \param identity the identity element // \param temp_storage some temporary storage // template <typename InputIterator, typename OutputIterator, typename BinaryOp, typename Identity> void exclusive_scan( host_tag tag, const uint32 n, InputIterator in, OutputIterator out, BinaryOp op, Identity identity, nvbio::vector<host_tag,uint8>& temp_storage) { thrust::exclusive_scan( in, in + n, out, identity, op ); } #if defined(__CUDACC__) // system-wide reduce // // \param n number of items to reduce // \param in a system iterator // \param op the binary reduction operator // \param temp_storage some temporary storage // template <typename InputIterator, typename BinaryOp> typename std::iterator_traits<InputIterator>::value_type reduce( device_tag tag, const uint32 n, InputIterator in, BinaryOp op, nvbio::vector<device_tag,uint8>& temp_storage) { return cuda::reduce( n, in, op, temp_storage ); } // device-wide inclusive scan // // \param n number of items to reduce // \param in a device input iterator // \param out a device output iterator // \param op the binary reduction operator // \param temp_storage some temporary storage // template <typename InputIterator, typename OutputIterator, typename BinaryOp> void inclusive_scan( device_tag tag, const uint32 n, InputIterator in, OutputIterator out, BinaryOp op, nvbio::vector<device_tag,uint8>& temp_storage) { cuda::inclusive_scan( n, in, out, op, temp_storage ); } // device-wide exclusive scan // // \param n number of items to reduce // \param in a device input iterator // \param out a device output iterator // \param op the binary reduction operator // \param identity the identity element // \param temp_storage some temporary storage // template <typename InputIterator, typename OutputIterator, typename BinaryOp, typename Identity> void exclusive_scan( device_tag tag, const uint32 n, InputIterator in, OutputIterator out, BinaryOp op, Identity identity, nvbio::vector<device_tag,uint8>& temp_storage) { cuda::exclusive_scan( n, in, out, op, identity, temp_storage ); } #endif // system-wide reduce // // \param n number of items to reduce // \param in a system iterator // \param op the binary reduction operator // \param temp_storage some temporary storage // template <typename system_tag, typename InputIterator, typename BinaryOp> typename std::iterator_traits<InputIterator>::value_type reduce( const uint32 n, InputIterator in, BinaryOp op, nvbio::vector<system_tag,uint8>& temp_storage) { return reduce( system_tag(), n, in, op, temp_storage ); } // device-wide inclusive scan // // \param n number of items to reduce // \param in a device input iterator // \param out a device output iterator // \param op the binary reduction operator // \param temp_storage some temporary storage // template <typename system_tag, typename InputIterator, typename OutputIterator, typename BinaryOp> void inclusive_scan( const uint32 n, InputIterator in, OutputIterator out, BinaryOp op, nvbio::vector<system_tag,uint8>& temp_storage) { inclusive_scan( system_tag(), n, in, out, op, temp_storage ); } // device-wide exclusive scan // // \param n number of items to reduce // \param in a device input iterator // \param out a device output iterator // \param op the binary reduction operator // \param identity the identity element // \param temp_storage some temporary storage // template <typename system_tag, typename InputIterator, typename OutputIterator, typename BinaryOp, typename Identity> void exclusive_scan( const uint32 n, InputIterator in, OutputIterator out, BinaryOp op, Identity identity, nvbio::vector<system_tag,uint8>& temp_storage) { exclusive_scan( system_tag(), n, in, out, op, identity, temp_storage ); } // host-wide copy of flagged items // // \param n number of input items // \param in a input iterator // \param flags a flags iterator // \param out a output iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename InputIterator, typename FlagsIterator, typename OutputIterator> uint32 copy_flagged( const host_tag tag, const uint32 n, InputIterator in, FlagsIterator flags, OutputIterator out, nvbio::vector<host_tag,uint8>& temp_storage) { return uint32( thrust::copy_if( in, in + n, flags, out, nvbio::is_true_functor<bool>() ) - out ); } // host-wide copy of predicated items // // \param n number of input items // \param in a input iterator // \param flags a flags iterator // \param out a output iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename InputIterator, typename OutputIterator, typename Predicate> uint32 copy_if( const host_tag tag, const uint32 n, InputIterator in, OutputIterator out, const Predicate pred, nvbio::vector<host_tag,uint8>& temp_storage) { return uint32( thrust::copy_if( in, in + n, out, pred ) - out ); } // system-wide run-length encode // // \param n number of input items // \param in a system input iterator // \param out a system output iterator // \param counts a system output count iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename InputIterator, typename OutputIterator, typename CountIterator> uint32 runlength_encode( const host_tag tag, const uint32 n, InputIterator in, OutputIterator out, CountIterator counts, nvbio::vector<host_tag,uint8>& temp_storage) { return uint32( thrust::reduce_by_key( in, in + n, thrust::make_constant_iterator<uint32>( 1u ), out, counts ).first - out ); }; // system-wide run-length encode // // \param n number of input items // \param keys_in a system input iterator // \param values_in a system input iterator // \param keys_out a system output iterator // \param values_out a system output iterator // \param reduction_op a reduction operator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename KeyIterator, typename ValueIterator, typename OutputKeyIterator, typename OutputValueIterator, typename ReductionOp> uint32 reduce_by_key( const host_tag tag, const uint32 n, KeyIterator keys_in, ValueIterator values_in, OutputKeyIterator keys_out, OutputValueIterator values_out, ReductionOp reduction_op, nvbio::vector<host_tag,uint8>& temp_storage) { typedef typename std::iterator_traits<KeyIterator>::value_type key_type; return uint32( thrust::reduce_by_key( keys_in, keys_in + n, values_in, keys_out, values_out, nvbio::equal_functor<key_type>(), reduction_op ).first - keys_out ); } #if defined(__CUDACC__) // device-wide copy of flagged items // // \param n number of input items // \param in a input iterator // \param flags a flags iterator // \param out a output iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename InputIterator, typename FlagsIterator, typename OutputIterator> uint32 copy_flagged( const device_tag tag, const uint32 n, InputIterator in, FlagsIterator flags, OutputIterator out, nvbio::vector<device_tag,uint8>& temp_storage) { return cuda::copy_flagged( n, in, flags, out, temp_storage ); } // device-wide copy of predicated items // // \param n number of input items // \param in a input iterator // \param flags a flags iterator // \param out a output iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename InputIterator, typename OutputIterator, typename Predicate> uint32 copy_if( const device_tag tag, const uint32 n, InputIterator in, OutputIterator out, const Predicate pred, nvbio::vector<device_tag,uint8>& temp_storage) { return cuda::copy_if( n, in, out, pred, temp_storage ); } // system-wide run-length encode // // \param n number of input items // \param in a device input iterator // \param out a device output iterator // \param counts a device output count iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename InputIterator, typename OutputIterator, typename CountIterator> uint32 runlength_encode( const device_tag tag, const uint32 n, InputIterator in, OutputIterator out, CountIterator counts, nvbio::vector<device_tag,uint8>& temp_storage) { return cuda::runlength_encode( n, in, out, counts, temp_storage ); }; // device-wide run-length encode // // \param n number of input items // \param keys_in a device input iterator // \param values_in a device input iterator // \param keys_out a device output iterator // \param values_out a device output iterator // \param reduction_op a reduction operator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename KeyIterator, typename ValueIterator, typename OutputKeyIterator, typename OutputValueIterator, typename ReductionOp> uint32 reduce_by_key( const device_tag tag, const uint32 n, KeyIterator keys_in, ValueIterator values_in, OutputKeyIterator keys_out, OutputValueIterator values_out, ReductionOp reduction_op, nvbio::vector<device_tag,uint8>& temp_storage) { return cuda::reduce_by_key( n, keys_in, values_in, keys_out, values_out, reduction_op, temp_storage ); } #endif // system-wide copy of flagged items // // \param n number of input items // \param in a device input iterator // \param flags a device flags iterator // \param out a device output iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename system_tag, typename InputIterator, typename FlagsIterator, typename OutputIterator> uint32 copy_flagged( const uint32 n, InputIterator in, FlagsIterator flags, OutputIterator out, nvbio::vector<system_tag,uint8>& temp_storage) { return copy_flagged( system_tag(), n, in, flags, out, temp_storage ); }; // system-wide copy of predicated items // // \param n number of input items // \param in a device input iterator // \param out a device output iterator // \param pred a unary predicate functor // \param temp_storage some temporary storage // // \return the number of copied items // template <typename system_tag, typename InputIterator, typename OutputIterator, typename Predicate> uint32 copy_if( const uint32 n, InputIterator in, OutputIterator out, const Predicate pred, nvbio::vector<system_tag,uint8>& temp_storage) { return copy_if( system_tag(), n, in, out, pred, temp_storage ); }; // system-wide run-length encode // // \param n number of input items // \param in a system input iterator // \param out a system output iterator // \param counts a system output count iterator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename system_tag, typename InputIterator, typename OutputIterator, typename CountIterator> uint32 runlength_encode( const uint32 n, InputIterator in, OutputIterator out, CountIterator counts, nvbio::vector<system_tag,uint8>& temp_storage) { return runlength_encode( system_tag(), n, in, out, counts, temp_storage ); }; // system-wide run-length encode // // \param n number of input items // \param keys_in a system input iterator // \param values_in a system input iterator // \param keys_out a system output iterator // \param values_out a system output iterator // \param reduction_op a reduction operator // \param temp_storage some temporary storage // // \return the number of copied items // template <typename system_tag, typename KeyIterator, typename ValueIterator, typename OutputKeyIterator, typename OutputValueIterator, typename ReductionOp> uint32 reduce_by_key( const uint32 n, KeyIterator keys_in, ValueIterator values_in, OutputKeyIterator keys_out, OutputValueIterator values_out, ReductionOp reduction_op, nvbio::vector<system_tag,uint8>& temp_storage) { return reduce_by_key( system_tag(), n, keys_in, values_in, keys_out, values_out, reduction_op, temp_storage ); } // device-wide lower_bound // // \param n number of input items // \param values a system input iterator of values to be searched // \param keys a system input iterator of sorted keys // \param indices a system output iterator // template <typename KeyIterator, typename ValueIterator, typename OutputIterator> void lower_bound( const device_tag tag, const uint32 n, ValueIterator values, const uint32 n_keys, KeyIterator keys, OutputIterator indices) { thrust::lower_bound( keys, keys + n_keys, values, values + n, indices ); } // host-wide lower_bound // // \param n number of input items // \param values a system input iterator of values to be searched // \param keys a system input iterator of sorted keys // \param indices a system output iterator // template <typename KeyIterator, typename ValueIterator, typename OutputIterator> void lower_bound( const host_tag tag, const uint32 n, ValueIterator values, const uint32 n_keys, KeyIterator keys, OutputIterator indices) { #pragma omp parallel for for (long i = 0; i < long(n); ++i) indices[i] = uint32( lower_bound( values[i], keys, n_keys ) - keys ); } // system-wide lower_bound // // \param n number of input items // \param values a system input iterator of values to be searched // \param keys a system input iterator of sorted keys // \param indices a system output iterator // template <typename system_tag, typename KeyIterator, typename ValueIterator, typename OutputIterator> void lower_bound( const uint32 n, ValueIterator values, const uint32 n_keys, KeyIterator keys, OutputIterator indices) { lower_bound( system_tag(), n, values, n_keys, keys, indices ); } // device-wide upper_bound // // \param n number of input items // \param values a system input iterator of values to be searched // \param keys a system input iterator of sorted keys // \param indices a system output iterator // template <typename KeyIterator, typename ValueIterator, typename OutputIterator> void upper_bound( const device_tag tag, const uint32 n, ValueIterator values, const uint32 n_keys, KeyIterator keys, OutputIterator indices) { thrust::upper_bound( keys, keys + n_keys, values, values + n, indices ); } // host-wide upper_bound // // \param n number of input items // \param values a system input iterator of values to be searched // \param keys a system input iterator of sorted keys // \param indices a system output iterator // template <typename KeyIterator, typename ValueIterator, typename OutputIterator> void upper_bound( const host_tag tag, const uint32 n, ValueIterator values, const uint32 n_keys, KeyIterator keys, OutputIterator indices) { #pragma omp parallel for for (long i = 0; i < long(n); ++i) indices[i] = uint32( upper_bound( values[i], keys, n_keys ) - keys ); } // system-wide upper_bound // // \param n number of input items // \param values a system input iterator of values to be searched // \param keys a system input iterator of sorted keys // \param indices a system output iterator // template <typename system_tag, typename KeyIterator, typename ValueIterator, typename OutputIterator> void upper_bound( const uint32 n, ValueIterator values, const uint32 n_keys, KeyIterator keys, OutputIterator indices) { upper_bound( system_tag(), n, values, n_keys, keys, indices ); } #if defined(__CUDACC__) // device-wide sort // // \param n number of input items // \param keys a system input iterator of keys to be sorted // template <typename KeyIterator> void radix_sort( const device_tag tag, const uint32 n, KeyIterator keys, nvbio::vector<device_tag,uint8>& temp_storage) { typedef typename std::iterator_traits<KeyIterator>::value_type key_type; cuda::alloc_temp_storage( temp_storage, 2 * n * sizeof(key_type) ); key_type* keys_ptr = reinterpret_cast<key_type*>( raw_pointer( temp_storage ) ); thrust::device_ptr<key_type> keys_buf( keys_ptr ); thrust::copy( keys, keys + n, keys_buf ); cuda::SortBuffers<key_type*> sort_buffers; sort_buffers.keys[0] = keys_ptr; sort_buffers.keys[1] = keys_ptr + n; cuda::SortEnactor sort_enactor; sort_enactor.sort( n, sort_buffers ); thrust::copy( keys_buf + sort_buffers.selector * n, keys_buf + sort_buffers.selector * n + n, keys ); } // device-wide sort by key // // \param n number of input items // \param keys a system input iterator of keys to be sorted // \param values a system input iterator of values to be sorted // template <typename KeyIterator, typename ValueIterator> void radix_sort( const device_tag tag, const uint32 n, KeyIterator keys, ValueIterator values, nvbio::vector<device_tag,uint8>& temp_storage) { typedef typename std::iterator_traits<KeyIterator>::value_type key_type; typedef typename std::iterator_traits<ValueIterator>::value_type value_type; const uint32 aligned_key_bytes = align<16>( 2 * n * sizeof(key_type) ); const uint32 aligned_val_bytes = 2 * n * sizeof(value_type); cuda::alloc_temp_storage( temp_storage, aligned_key_bytes + aligned_val_bytes ); key_type* keys_ptr = reinterpret_cast<key_type*>( raw_pointer( temp_storage ) ); value_type* values_ptr = reinterpret_cast<value_type*>( raw_pointer( temp_storage ) + aligned_key_bytes ); thrust::device_ptr<key_type> keys_buf( keys_ptr ); thrust::device_ptr<key_type> values_buf( values_ptr ); thrust::copy( keys, keys + n, keys_buf ); thrust::copy( values, values + n, values_buf ); cuda::SortBuffers<key_type*, value_type*> sort_buffers; sort_buffers.keys[0] = keys_ptr; sort_buffers.keys[1] = keys_ptr + n; sort_buffers.values[0] = values_ptr; sort_buffers.values[1] = values_ptr + n; cuda::SortEnactor sort_enactor; sort_enactor.sort( n, sort_buffers ); thrust::copy( keys_buf + sort_buffers.selector * n, keys_buf + sort_buffers.selector * n + n, keys ); thrust::copy( values_buf + sort_buffers.selector * n, values_buf + sort_buffers.selector * n + n, values ); } #endif // host-wide sort // // \param n number of input items // \param keys a system input iterator of keys to be sorted // template <typename KeyIterator> void radix_sort( const host_tag tag, const uint32 n, KeyIterator keys, nvbio::vector<host_tag,uint8>& temp_storage) { thrust::sort( keys, keys + n ); } // system-wide sort // // \param n number of input items // \param keys a system input iterator of keys to be sorted // template <typename system_tag, typename KeyIterator> void radix_sort( const uint32 n, KeyIterator keys, nvbio::vector<system_tag,uint8>& temp_storage) { radix_sort( system_tag(), n, keys, temp_storage ); } // host-wide sort by key // // \param n number of input items // \param keys a system input iterator of keys to be sorted // \param values a system input iterator of values to be sorted // template <typename KeyIterator, typename ValueIterator> void radix_sort( const host_tag tag, const uint32 n, KeyIterator keys, ValueIterator values, nvbio::vector<host_tag,uint8>& temp_storage) { thrust::sort_by_key( keys, keys + n, values, temp_storage ); } // system-wide sort by key // // \param n number of input items // \param keys a system input iterator of keys to be sorted // \param values a system input iterator of values to be sorted // template <typename system_tag, typename KeyIterator, typename ValueIterator> void radix_sort( const uint32 n, KeyIterator keys, ValueIterator values, nvbio::vector<system_tag,uint8>& temp_storage) { radix_sort( system_tag(), n, keys, values, temp_storage ); } template < typename key_iterator1, typename key_iterator2> uint2 corank( const int32 i, const key_iterator1 A, const int32 m, const key_iterator2 B, const int32 n) { int32 j = min( i, m ); int32 k = i - j; int32 j_lo = i >= n ? i - n : 0; int32 k_lo = 0; while (1) { if ((j > 0 || k < n) && A[j-1] > B[k]) { // decrease j const int32 delta = util::divide_ri( j - j_lo, 2 ); k_lo = k; j -= delta; k += delta; assert( j + k == i ); } else if ((k > 0 || j < m) && B[k-1] >= A[j]) { // decrease k const int32 delta = util::divide_ri( k - k_lo, 2 ); j_lo = j; j += delta; k -= delta; assert( j + k == i ); } else break; } return make_uint2( uint32(j), uint32(k) ); } template < typename key_iterator1, typename key_iterator2, typename value_iterator1, typename value_iterator2, typename key_output, typename value_output> void merge_by_key( const host_tag tag, const uint32 A_len, const uint32 B_len, const key_iterator1 A_keys, const key_iterator2 B_keys, const value_iterator1 A_values, const value_iterator2 B_values, key_output C_keys, value_output C_values) { if (A_len == 0) { #pragma omp parallel for for (int32 i = 0; i < int32( B_len ); ++i) { C_keys[i] = A_keys[i]; C_values[i] = A_values[i]; } } else if (B_len == 0) { #pragma omp parallel for for (int32 i = 0; i < int32( A_len ); ++i) { C_keys[i] = A_keys[i]; C_values[i] = A_values[i]; } } const uint32 n_threads = (uint32)omp_get_num_procs(); nvbio::vector<host_tag,uint32> A_diag( n_threads+1 ); nvbio::vector<host_tag,uint32> B_diag( n_threads+1 ); const uint32 C_len = A_len + B_len; A_diag[ n_threads ] = 0; B_diag[ n_threads ] = 0; A_diag[ n_threads ] = A_len; B_diag[ n_threads ] = B_len; const uint32 n_partition = util::divide_ri( C_len, n_threads ); #pragma omp parallel for num_threads(n_threads) for (int32 i = 1; i < int32( n_threads ); ++i) { const int32 index = i * n_partition; const uint2 jk = corank( index, A_keys, A_len, B_keys, B_len ); A_diag[i] = jk.x; B_diag[i] = jk.y; } #pragma omp parallel for num_threads(n_threads) for (int32 i = 0; i < int32( n_threads ); ++i) { nvbio::merge_by_key( A_keys + A_diag[i], A_keys + A_diag[i+1], B_keys + B_diag[i], B_keys + B_diag[i+1], A_values + A_diag[i], B_values + B_diag[i], C_keys + i * n_partition, C_values + i * n_partition ); } /* for (uint32 i = 1; i < C_len; ++i) { if (C_keys[i-1] > C_keys[i]) { fprintf(stderr, "merging error at %u: %llu, %llu\n", i, C_keys[i-1], C_keys[i] ); exit(1); } }*/ } template < typename key_iterator1, typename key_iterator2, typename value_iterator1, typename value_iterator2, typename key_output, typename value_output> void merge_by_key( const device_tag tag, const uint32 A_len, const uint32 B_len, const key_iterator1 A_keys, const key_iterator2 B_keys, const value_iterator1 A_values, const value_iterator2 B_values, key_output C_keys, value_output C_values) { thrust::merge_by_key( A_keys, A_keys + A_len, B_keys, B_keys + A_len, A_values, B_values, C_keys, C_values ); } template < typename system_tag, typename key_iterator1, typename key_iterator2, typename value_iterator1, typename value_iterator2, typename key_output, typename value_output> void merge_by_key( const uint32 A_len, const uint32 B_len, const key_iterator1 A_keys, const key_iterator2 B_keys, const value_iterator1 A_values, const value_iterator2 B_values, key_output C_keys, value_output C_values, nvbio::vector<system_tag,uint8>& temp_storage) { merge_by_key( system_tag(), A_len, B_len, A_keys, B_keys, A_values, B_values, C_keys, C_values ); } #if defined(__CUDACC__) /// A very simple for_each CUDA kernel /// template <typename iterator_type, typename functor_type> __global__ void for_each_kernel(const uint64 n, const iterator_type in, const functor_type f) { const uint32 grid_size = blockDim.x * gridDim.x; for (uint64 i = threadIdx.x + blockIdx.x * blockDim.x; i < n; i += grid_size) f( in[i] ); }; #endif // ask the optimizer how many blocks we should try using next // template <typename KernelFunction> uint32 for_each_enactor<device_tag>::suggested_blocks(KernelFunction kernel, const uint32 cta_size) const { #if defined(__CUDACC__) if (m_blocks_hi == 0) return cuda::multiprocessor_count() * cuda::max_active_blocks_per_multiprocessor( kernel, cta_size, 0u ); else if (m_blocks_lo == 0) return cuda::multiprocessor_count(); else return cuda::multiprocessor_count() * (m_blocks_lo + m_blocks_hi) / 2; #else return 0u; #endif } // update the optimizer's internal state with the latest speed data-point // inline void for_each_enactor<device_tag>::update(const uint32 n_blocks, const float speed) { #if defined(__CUDACC__) // carry out a little binary search over the best number of blocks/SM if (m_blocks_hi == 0) { m_blocks_hi = n_blocks / cuda::multiprocessor_count(); m_speed_hi = speed; } else if (m_blocks_lo == 0) { m_blocks_lo = n_blocks / cuda::multiprocessor_count(); m_speed_lo = speed; } else if (m_speed_lo > m_speed_hi) { m_blocks_hi = n_blocks / cuda::multiprocessor_count(); m_speed_hi = speed; } else { m_blocks_lo = n_blocks / cuda::multiprocessor_count(); m_speed_lo = speed; } // TODO: once the optimizer settles to a given value, it will never change: // we should explore using occasional "mutations" to adapt to possibly // changing conditions... #endif } // enact the for_each // template <typename Iterator, typename Functor> void for_each_enactor<device_tag>::operator () ( const uint64 n, const Iterator in, Functor functor) { #if defined(__CUDACC__) const uint32 blockdim = 128; const uint32 n_blocks = suggested_blocks( for_each_kernel<Iterator,Functor>, blockdim ); cuda::Timer timer; timer.start(); for_each_kernel<<<n_blocks,blockdim>>>( n, in, functor ); timer.stop(); update( n_blocks, float(n) / timer.seconds() ); #endif } } // namespace nvbio
ep.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "npb-C.h" #include "npbparams.h" /* parameters */ #define MK 16 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 #define TIMERS_ENABLED FALSE /* global variables */ /* common /storage/ */ static double x[2*NK]; #pragma omp threadprivate(x) static double q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* c This is the serial version of the APP Benchmark 1, c the "embarassingly parallel" benchmark. c c M is the Log_2 of the number of complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of the size of each batch of uniform random c numbers. MK can be set for convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; double dum[3] = { 1.0, 1.0, 1.0 }; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13+1]; /* character*13 */ /* c Because the size of the problem is too large to store in a 32-bit c integer for some classes, we put it into a string (for printing). c Have to strip off the decimal point put in there by the floating c point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0, M+1)); for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* c Compute the number of "batches" of random number pairs generated c per processor. Adjust if the number of processors does not evenly c divide the total number */ np = NN; /* c Call the random number generator functions and initialize c the x-array to reduce the effects of paging on the timings. c Also, call all mathematical functions that are used. Make c sure these initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); #pragma omp parallel for default(shared) private(i) for (i = 0; i < 2*NK; i++) x[i] = -1.0e99; Mops = log(sqrt(fabs(max(1.0, 1.0)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for ( i = 1; i <= MK+1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0; sx = 0.0; sy = 0.0; for ( i = 0; i <= NQ - 1; i++) { q[i] = 0.0; } /* c Each instance of this loop may be performed independently. We compute c the k offsets separately to take into account the fact that some nodes c have more numbers to generate than others */ k_offset = -1; #pragma omp parallel copyin(x) { double t1, t2, t3, t4, x1, x2; int kk, i, ik, l; double qq[NQ]; /* private copy of q[0:NQ-1] */ for (i = 0; i < NQ; i++) qq[i] = 0.0; #pragma omp for reduction(+:sx,sy) schedule(static) for (k = 1; k <= np; k++) { kk = k_offset + k; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) t3 = randlc(&t1, t2); if (ik == 0) break; t3 = randlc(&t2, t2); kk = ik; } /* Compute uniform pseudorandom numbers. */ if (TIMERS_ENABLED == TRUE) timer_start(3); vranlc(2*NK, &t1, A, x-1); if (TIMERS_ENABLED == TRUE) timer_stop(3); /* c Compute Gaussian deviates by acceptance-rejection method and c tally counts in concentric square annuli. This loop is not c vectorizable. */ if (TIMERS_ENABLED == TRUE) timer_start(2); for ( i = 0; i < NK; i++) { x1 = 2.0 * x[2*i] - 1.0; x2 = 2.0 * x[2*i+1] - 1.0; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0) { t2 = sqrt(-2.0 * log(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabs(t3), fabs(t4)); qq[l] += 1.0; /* counts */ sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } if (TIMERS_ENABLED == TRUE) timer_stop(2); } #pragma omp critical { for (i = 0; i <= NQ - 1; i++) q[i] += qq[i]; } #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end of parallel region */ for (i = 0; i <= NQ-1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) && (fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) && (fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) && (fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 30) { if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) && (fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) && (fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0, M+1)/tm/1000000.0; printf("EP Benchmark Results: \n" "CPU Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ-1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } }
pr35244.c
/* PR c++/35244 */ /* { dg-do compile } */ /* { dg-require-effective-target tls } */ /* { dg-options "-fopenmp" } */ int v1; typedef struct A A; typedef int i; #pragma omp threadprivate (i) /* { dg-error "expected identifier before" } */ #pragma omp threadprivate (A) /* { dg-error "expected identifier before" } */ #pragma omp threadprivate (v1) void foo () { static int v4; { static int v5; #pragma omp threadprivate (v4, v5) } }
bfs_dijkstra.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "ompdist/utils.h" #include "ompdist/vector.h" #include "ompdist/graph.h" #include "ompdist/graph_gen.h" #include "ompdist/msr.h" #include "config.h" #define START 1 #define JOIN 2 int ROOT; typedef struct { int parent_label; int phase_discovered; } payload; /** * initialize_graph - Initializes the graph with basic data * * @g: a pointer to the graph object */ void initialize_graph(graph* g) { // allocate the data field for each node #pragma omp parallel for schedule(SCHEDULING_METHOD) for (int i = 0; i < g->N; i++) { node* cur = elem_at(&g->vertices, i); payload* data = malloc(sizeof(payload)); data->parent_label = -1; data->phase_discovered = -1; cur->data = data; } // set the root node to be a part of T_1 at the beginning node* root = elem_at(&g->vertices, ROOT); payload* data = root->data; data->phase_discovered = 0; } /** * broadcast_start - Broadcasts the start message to elements of T_p * * @g: a pointer to the graph object * @p: current phase * * Returns 1 if no new vertices were discovered, and 0 if new vertices were * discovered. */ int broadcast_start(graph* g, int p) { int nobody_was_discovered = 1; #pragma omp parallel for schedule(SCHEDULING_METHOD) for (int i = 0; i < g->N; i++) { node* cur = elem_at(&g->vertices, i); payload* data = cur->data; // this node was just discovered in phase `p` if (data->phase_discovered == p) { // we send a "join p+1" message to all quiet neighbors for (int j = 0; j < cur->degree; j++) { node* neighbor = *((node**) elem_at(&cur->neighbors, j)); payload* neighbor_data = neighbor->data; if (neighbor_data->phase_discovered < 0) { neighbor_data->phase_discovered = p+1; neighbor_data->parent_label = cur->label; nobody_was_discovered = 0; } } } } return nobody_was_discovered; } /** * print_solution - prints a BFS Dijkstra solution * * @g: a pointer to the graph object */ void print_solution(graph* g) { int max_distance = 0; /** * Note: there's no `#pragma omp parallel` required here - this is not a * part of the solution computation. */ for (int i = 0; i < g->N; i++) { node* cur = elem_at(&g->vertices, i); payload* data = cur->data; if (data->phase_discovered > max_distance) max_distance = data->phase_discovered; INFO("node_%d: parent = %d, dist = %d\n", cur->label, data->parent_label, data->phase_discovered); } INFO("max_distance = %d\n", max_distance); } /** * Based on Roger Wattenhofer's Principles of Distributed Computing's * Algorithm 2.11 Dijkstra BFS (synchronous version), where nodes are added * in phases. */ int main(int argc, char* argv[]) { int N; int M; graph* g; int iterate; int iterations = 1; if ((iterate = input_through_argv(argc, argv))) { FILE* in = fopen(argv[2], "r"); fscanf(in, "%d\n", &N); g = new_graph(N, 0); fscanf(in, "%d\n", &ROOT); g->M = M = read_graph(g, in); fclose(in); sscanf(argv[3], "%d", &iterations); } else { N = 16; M = 64; if (argc > 1) { sscanf(argv[1], "%d", &N); sscanf(argv[2], "%d", &M); } g = generate_new_connected_graph(N, M); ROOT = 0; } long long duration = 0; double total_energy = 0; for (int i = 0; i < iterations; i++) { begin_timer(); init_energy_measure(); int p = 0; int nobody_was_discovered = 0; initialize_graph(g); while (!nobody_was_discovered) { nobody_was_discovered = broadcast_start(g, p); p++; } total_energy += total_energy_used(); duration += time_elapsed(); // print_solution(g); } if (iterate) printf("%.2lf %.2lf\n", ((double) duration) / iterations, total_energy / iterations); return 0; }
q_rhashmap_mk_tid.c
/* * Copyright (c) 2019 Ramesh Subramonian <subramonian@gmail.com> * All rights reserved. * * Use is subject to license terms, as specified in the LICENSE file. */ //------------------------------------------------------ #include "q_rhashmap_common.h" #include "q_rhashmap_mk_tid.h" /* Ideally, we want to distribute the work to the threads so that * 1) they never update the same cell * 2) they (ideally) have large contiguous regions which they own i.e., * only they write in that region Dividing based on hashes gives us 1) Dividing based on locs gives us 2) However, since 1) is more important than 2), we went with 1) Note that locs doesn't give you the location of a key. It only gives you a starting point for the hunt for the location of a key */ int q_rhashmap_mk_tid( uint32_t *hashes, // input [nkeys] uint32_t nkeys, // input uint32_t nT, // input , number of threads uint8_t *tids // output [nkeys] ) { int status = 0; int chunk_size = 1024; uint64_t divinfo = fast_div32_init(nT); #pragma omp parallel for schedule(static, chunk_size) for ( uint32_t i = 0; i < nkeys; i++ ) { tids[i] = fast_rem32(hashes[i], nT, divinfo); } return status; }
parallel-1.c
// { dg-do compile } void foo() { int i; #pragma omp parallel { #pragma omp parallel { #pragma omp parallel { i++; } } } }
IJMatrix_parcsr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "_hypre_parcsr_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixCreateParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_ParCSRMatrix *par_matrix; HYPRE_BigInt row_starts[2]; HYPRE_BigInt col_starts[2]; HYPRE_Int i; if (hypre_IJMatrixGlobalFirstRow(matrix)) { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix); } } else { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i]; } } if (hypre_IJMatrixGlobalFirstCol(matrix)) { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i] - hypre_IJMatrixGlobalFirstCol(matrix); } } else { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i]; } } par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix), hypre_IJMatrixGlobalNumCols(matrix), row_starts, col_starts, 0, 0, 0); hypre_IJMatrixObject(matrix) = par_matrix; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetRowSizesParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *sizes) { HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); } if (!row_space) { row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } for (i = 0; i < local_num_rows; i++) { row_space[i] = sizes[i]; } if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0; for (i = 0; i < local_num_rows; i++) { hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i]; } #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetDiagOffdSizesParCSR * sets diag_i inside the diag part of the ParCSRMatrix * and offd_i inside the offd part, * requires exact row sizes for diag and offd * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *diag_sizes, const HYPRE_Int *offd_sizes) { HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOnProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_on_proc_elmts) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm, &my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]); hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOffProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_off_proc_elmts) { hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm, &my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { local_num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1] - col_partitioning[0]); hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixInitializeParCSR * * initializes AuxParCSRMatrix and ParCSRMatrix as necessary * *****************************************************************************/ HYPRE_Int hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix) { return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle())); } HYPRE_Int hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); HYPRE_MemoryLocation memory_location_aux = hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; if (hypre_IJMatrixAssembleFlag(matrix) == 0) { if (!par_matrix) { hypre_IJMatrixCreateParCSR(matrix); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); } HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix); HYPRE_Int i; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix), NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location); hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux); if (memory_location_aux == HYPRE_MEMORY_HOST) { if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix)) { for (i = 0; i < local_num_rows; i++) { hypre_CSRMatrixI(diag)[i + 1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes( aux_matrix)[i]; } hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows]; hypre_CSRMatrixInitialize(diag); } if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix)) { for (i = 0; i < local_num_rows; i++) { hypre_CSRMatrixI(offd)[i + 1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes( aux_matrix)[i]; } hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows]; hypre_CSRMatrixInitialize(offd); } } if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_num_rows; i++) { hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i]; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i]; } } } else if ( memory_location_aux == HYPRE_MEMORY_HOST ) { /* AB 4/06 - the assemble routine destroys the aux matrix - so we need to recreate if initialize is called again */ if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix), hypre_ParCSRMatrixNumCols(par_matrix), NULL); hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST; hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetRowCountsParCSR * * gets the number of columns for rows specified by the user * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_BigInt *rows, HYPRE_Int *ncols) { HYPRE_BigInt row_index; MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int i, my_id, index; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < nrows; i++) { row_index = rows[i]; if (row_index >= row_partitioning[0] && row_index < row_partitioning[1]) { /* compute local row number */ index = (HYPRE_Int)(row_index - row_partitioning[0]); ncols[i] = diag_i[index + 1] - diag_i[index] + offd_i[index + 1] - offd_i[index]; } else { ncols[i] = 0; if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row_index, my_id); } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetValuesParCSR * * gets values of an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, HYPRE_BigInt *rows, HYPRE_BigInt *cols, HYPRE_Complex *values) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix); hypre_CSRMatrix *diag; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; hypre_CSRMatrix *offd; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_Int i, j, n, ii, indx; HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, row, col_indx, first; HYPRE_Int row_local, row_size; HYPRE_Int warning = 0; HYPRE_Int *counter; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (assemble_flag == 0) { hypre_error_in_arg(1); if (print_level) { hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n"); } } col_0 = col_starts[0]; col_n = col_starts[1] - 1; first = hypre_IJMatrixGlobalFirstCol(matrix); diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); } if (nrows < 0) { nrows = -nrows; counter = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); counter[0] = 0; for (i = 0; i < nrows; i++) { counter[i + 1] = counter[i] + ncols[i]; } indx = 0; for (i = 0; i < nrows; i++) { row = rows[i]; if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); row_size = diag_i[row_local + 1] - diag_i[row_local] + offd_i[row_local + 1] - offd_i[row_local]; if (counter[i] + row_size > counter[nrows]) { hypre_error_in_arg(1); if (print_level) { hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n"); } } if (ncols[i] < row_size) { warning = 1; } for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++) { cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0; values[indx++] = diag_data[j]; } for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++) { cols[indx] = col_map_offd[offd_j[j]]; values[indx++] = offd_data[j]; } counter[i + 1] = indx; } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } if (warning) { for (i = 0; i < nrows; i++) { ncols[i] = counter[i + 1] - counter[i]; } if (print_level) { hypre_printf ("Warning! ncols has been changed!\n"); } } hypre_TFree(counter, HYPRE_MEMORY_HOST); } else { indx = 0; for (ii = 0; ii < nrows; ii++) { row = rows[ii]; n = ncols[ii]; if (n == 0) /* empty row */ { continue; } if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ for (i = 0; i < n; i++) { col_indx = cols[indx] - first; values[indx] = 0.0; if (col_indx < col_0 || col_indx > col_n) /* search in offd */ { for (j = offd_i[row_local]; j < offd_i[row_local + 1]; j++) { if (col_map_offd[offd_j[j]] == col_indx) { values[indx] = offd_data[j]; break; } } } else /* search in diag */ { col_indx = col_indx - col_0; for (j = diag_i[row_local]; j < diag_i[row_local + 1]; j++) { if (diag_j[j] == (HYPRE_Int)col_indx) { values[indx] = diag_data[j]; break; } } } indx++; } } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetValuesParCSR * * sets values in an IJMatrix before assembly, * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; //HYPRE_Int row_len; HYPRE_BigInt col_0, col_n, row; HYPRE_Int i, ii, j, n, not_found; //HYPRE_Int col_indx, cnt1; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt first; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); col_0 = col_partitioning[0]; col_n = col_partitioning[1] - 1; first = hypre_IJMatrixGlobalFirstCol(matrix); if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; HYPRE_Int j_offd; for (ii = 0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local + 1] - diag_i[row_local] + offd_i[row_local + 1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local + 1]; len_offd = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } for (j = pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1;*/ return hypre_error_flag; } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j = pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1; */ return hypre_error_flag; } } indx++; } } } } else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii = 0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i = 0; i < n; i++) { for (j = 0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size + tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size + tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size + tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i = 0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; HYPRE_Int col_j; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local + 1]; offd_space = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j = offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { col_j = (HYPRE_Int)(cols[indx] - col_0); for (j = diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetConstantValuesParCSR * * sets all values in an already assembled IJMatrix to a constant value. * *****************************************************************************/ void hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix, HYPRE_Complex value ) { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag); HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd); HYPRE_Int ii; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_diag; ii++) { diag_data[ii] = value; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_offd; ii++) { offd_data[ii] = value; } } HYPRE_Int hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Complex value ) { if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value); } else #endif { hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value); } } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Matrix not assembled! Required to set constant values!"); } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_BigInt col_0, col_n; HYPRE_Int i, ii, j, n, not_found; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_BigInt first; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); col_0 = col_partitioning[0]; col_n = col_partitioning[1] - 1; first = hypre_IJMatrixGlobalFirstCol(matrix); if (hypre_IJMatrixAssembleFlag(matrix)) { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; HYPRE_Int j_offd; /* AB - 4/06 - need to get this object*/ aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); for (ii = 0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local + 1] - diag_i[row_local] + offd_i[row_local + 1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local + 1]; len_offd = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; /* return -1; */ } for (j = pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j = pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } } indx++; } } /* not my row */ else { if (!aux_matrix) { size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n, 1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3 * n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } /* AB - 4/6 - the row should be negative to indicate an add */ /* UMY - 12/28/09 - now positive since we eliminated the feature of setting on other processors */ /* off_proc_i[off_proc_i_indx++] = row; */ off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i = 0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii = 0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i = 0; i < n; i++) { for (j = 0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size + tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size + tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size + tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i = 0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local + 1]; offd_space = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j = offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1;*/ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0); for (j = diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n, 1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3 * n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i = 0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixDestroyParCSR * * frees an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix) { hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix)); hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix)); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixTransposeParCSR * * Tranposes an IJMatrix of type ParCSRMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixTransposeParCSR( hypre_IJMatrix *matrix_A, hypre_IJMatrix *matrix_AT ) { hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A); hypre_ParCSRMatrix *par_AT; /* Free old object if existent */ if (hypre_IJMatrixObject(matrix_AT)) { par_AT = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_AT); hypre_ParCSRMatrixDestroy(par_AT); hypre_IJMatrixObject(matrix_AT) = NULL; } hypre_ParCSRMatrixTranspose(par_A, &par_AT, 1); hypre_ParCSRMatrixSetNumNonzeros(par_AT); hypre_ParCSRMatrixSetDNumNonzeros(par_AT); hypre_MatvecCommPkgCreate(par_AT); hypre_IJMatrixObject(matrix_AT) = (void *) par_AT; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixNormParCSR * * Computes the Infinity norm of an IJMatrix of type ParCSRMatrix * * TODO: Add other norms * *****************************************************************************/ HYPRE_Int hypre_IJMatrixNormParCSR( hypre_IJMatrix *matrix, HYPRE_Real *norm ) { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix); hypre_ParCSRMatrixInfNorm(par_matrix, norm); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddParCSR * * Performs C = alpha*A + beta*B, where A, B and C are IJMatrices of * type ParCSRMatrix. * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddParCSR( HYPRE_Complex alpha, hypre_IJMatrix *matrix_A, HYPRE_Complex beta, hypre_IJMatrix *matrix_B, hypre_IJMatrix *matrix_C ) { hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A); hypre_ParCSRMatrix *par_B = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_B); hypre_ParCSRMatrix *par_C; /* Free old object if existent */ if (hypre_IJMatrixObject(matrix_C)) { par_C = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_C); hypre_ParCSRMatrixDestroy(par_C); hypre_IJMatrixObject(matrix_C) = NULL; } hypre_ParCSRMatrixAdd(alpha, par_A, beta, par_B, &par_C); hypre_ParCSRMatrixSetNumNonzeros(par_C); hypre_ParCSRMatrixSetDNumNonzeros(par_C); if (!hypre_ParCSRMatrixCommPkg(par_C)) { hypre_MatvecCommPkgCreate(par_C); } hypre_IJMatrixObject(matrix_C) = (void *) par_C; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAssembleOffProcValsParCSR * * This is for handling set and get values calls to off-proc. entries - * it is called from matrix assemble. There is an alternate version for * when the assumed partition is being used. * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix, HYPRE_Int off_proc_i_indx, HYPRE_Int max_off_proc_elmts, HYPRE_Int current_num_elmts, HYPRE_MemoryLocation memory_location, HYPRE_BigInt *off_proc_i, HYPRE_BigInt *off_proc_j, HYPRE_Complex *off_proc_data ) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int i, j, k, in_i; HYPRE_Int myid; HYPRE_Int proc_id, last_proc, prev_id, tmp_id; HYPRE_Int max_response_size; HYPRE_BigInt global_num_cols; HYPRE_BigInt global_first_col; HYPRE_BigInt global_first_row; HYPRE_Int ex_num_contacts = 0, num_rows = 0; HYPRE_BigInt range_start, range_end; HYPRE_Int num_elements; HYPRE_Int storage; HYPRE_Int indx; HYPRE_BigInt row; HYPRE_Int num_ranges, row_index = 0; HYPRE_Int num_recvs; HYPRE_BigInt upper_bound; HYPRE_Int counter; HYPRE_Int num_real_procs; HYPRE_Int /*current_proc,*/ original_proc_indx; HYPRE_BigInt *row_list = NULL; HYPRE_Int *row_list_num_elements = NULL; HYPRE_Int *a_proc_id = NULL, *orig_order = NULL; HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL; HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL; HYPRE_BigInt *ex_contact_buf = NULL; HYPRE_Int *recv_starts = NULL; HYPRE_BigInt *response_buf = NULL; HYPRE_Int *response_buf_starts = NULL; HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL; HYPRE_Int *argsort_contact_procs = NULL; HYPRE_Int obj_size_bytes, complex_size; HYPRE_BigInt big_int_size; HYPRE_Int tmp_int; HYPRE_BigInt tmp_big_int; HYPRE_BigInt *col_ptr; HYPRE_BigInt *big_int_data = NULL; HYPRE_Int big_int_data_size = 0, complex_data_size = 0; void *void_contact_buf = NULL; void *index_ptr; void *recv_data_ptr; HYPRE_Complex tmp_complex; HYPRE_Complex *col_data_ptr; HYPRE_Complex *complex_data = NULL; hypre_DataExchangeResponse response_obj1, response_obj2; hypre_ProcListElements send_proc_obj; hypre_IJAssumedPart *apart; hypre_MPI_Comm_rank(comm, &myid); global_num_cols = hypre_IJMatrixGlobalNumCols(matrix); global_first_col = hypre_IJMatrixGlobalFirstCol(matrix); global_first_row = hypre_IJMatrixGlobalFirstRow(matrix); if (memory_location == HYPRE_MEMORY_DEVICE) { HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2 * current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST); hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); for (i = 0; i < current_num_elmts; i++) { #if defined(HYPRE_DEBUG) hypre_assert(tmp[i] < hypre_IJMatrixRowPartitioning(matrix)[0] || tmp[i] >= hypre_IJMatrixRowPartitioning(matrix)[1]); hypre_assert(tmp[i] >= global_first_row && tmp[i] < global_first_row + hypre_IJMatrixGlobalNumRows(matrix)); hypre_assert(off_proc_j_h[i] >= global_first_col && off_proc_j_h[i] < global_first_col + global_num_cols); #endif off_proc_i_h[2 * i] = tmp[i]; off_proc_i_h[2 * i + 1] = 1; } off_proc_i_indx = current_num_elmts * 2; off_proc_i = off_proc_i_h; off_proc_j = off_proc_j_h; off_proc_data = off_proc_data_h; hypre_TFree(tmp, HYPRE_MEMORY_HOST); } /* call hypre_IJMatrixAddToValuesParCSR directly inside this function * with one chunk of data */ HYPRE_Int off_proc_nelm_recv_cur = 0; HYPRE_Int off_proc_nelm_recv_max = 0; HYPRE_BigInt *off_proc_i_recv = NULL; HYPRE_BigInt *off_proc_j_recv = NULL; HYPRE_Complex *off_proc_data_recv = NULL; HYPRE_BigInt *off_proc_i_recv_d = NULL; HYPRE_BigInt *off_proc_j_recv_d = NULL; HYPRE_Complex *off_proc_data_recv_d = NULL; num_rows = off_proc_i_indx / 2; /* verify that we have created the assumed partition */ if (hypre_IJMatrixAssumedPart(matrix) == NULL) { hypre_IJMatrixCreateAssumedPartition(matrix); } apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix); /*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(par_matrix); } apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/ row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST); row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); /* get the assumed processor id for each row */ if (num_rows > 0 ) { for (i = 0; i < num_rows; i++) { row = off_proc_i[i * 2]; //if (row < 0) row = -row - 1; row_list[i] = row; row_list_num_elements[i] = off_proc_i[i * 2 + 1]; hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row, global_num_cols, &proc_id); a_proc_id[i] = proc_id; orig_order[i] = i; } /* now we need to find the actual order of each row - sort on row - this will result in proc ids sorted also...*/ hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows - 1); /* calculate the number of contacts */ ex_num_contacts = 1; last_proc = a_proc_id[0]; for (i = 1; i < num_rows; i++) { if (a_proc_id[i] > last_proc) { ex_num_contacts++; last_proc = a_proc_id[i]; } } } /* now we will go through a create a contact list - need to contact assumed processors and find out who the actual row owner is - we will contact with a range (2 numbers) */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST); ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts + 1, HYPRE_MEMORY_HOST); ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts * 2, HYPRE_MEMORY_HOST); counter = 0; range_end = -1; for (i = 0; i < num_rows; i++) { if (row_list[i] > range_end) { /* assumed proc */ proc_id = a_proc_id[i]; /* end of prev. range */ if (counter > 0) { ex_contact_buf[counter * 2 - 1] = row_list[i - 1]; } /*start new range*/ ex_contact_procs[counter] = proc_id; ex_contact_vec_starts[counter] = counter * 2; ex_contact_buf[counter * 2] = row_list[i]; counter++; hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols, &range_start, &range_end); } } /* finish the starts */ ex_contact_vec_starts[counter] = counter * 2; /* finish the last range */ if (counter > 0) { ex_contact_buf[counter * 2 - 1] = row_list[num_rows - 1]; } /* don't allocate space for responses */ /* create response object - can use same fill response as used in the commpkg routine */ response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs; response_obj1.data1 = apart; /* this is necessary so we can fill responses*/ response_obj1.data2 = NULL; max_response_size = 6; /* 6 means we can fit 3 ranges*/ hypre_DataExchangeList(ex_num_contacts, ex_contact_procs, ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt), sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1, comm, (void**) &response_buf, &response_buf_starts); /* now response_buf contains a proc_id followed by a range upper bound */ hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST); /*how many ranges were returned?*/ num_ranges = response_buf_starts[ex_num_contacts]; num_ranges = num_ranges / 2; prev_id = -1; j = 0; counter = 0; num_real_procs = 0; /* loop through ranges - create a list of actual processor ids*/ for (i = 0; i < num_ranges; i++) { upper_bound = response_buf[i * 2 + 1]; counter = 0; tmp_id = response_buf[i * 2]; /* loop through row_list entries - counting how many are in the range */ while (j < num_rows && row_list[j] <= upper_bound) { real_proc_id[j] = tmp_id; j++; counter++; } if (counter > 0 && tmp_id != prev_id) { num_real_procs++; } prev_id = tmp_id; } /* now we have the list of real processor ids (real_proc_id) - and the number of distinct ones - so now we can set up data to be sent - we have HYPRE_Int data and HYPRE_Complex data. that we will need to pack together */ /* first find out how many rows and elements we need to send per proc - so we can do storage */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); counter = 0; if (num_real_procs > 0 ) { ex_contact_procs[0] = real_proc_id[0]; num_rows_per_proc[0] = 1; num_elements_total[0] = row_list_num_elements[orig_order[0]]; /* loop through real procs - these are sorted (row_list is sorted also)*/ for (i = 1; i < num_rows; i++) { if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */ { num_rows_per_proc[counter] += 1; /*another row */ num_elements_total[counter] += row_list_num_elements[orig_order[i]]; } else /* new processor */ { counter++; ex_contact_procs[counter] = real_proc_id[i]; num_rows_per_proc[counter] = 1; num_elements_total[counter] = row_list_num_elements[orig_order[i]]; } } } /* to pack together, we need to use the largest obj. size of (HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are wasting some storage, but I do not think that it will be a large amount since this function should not be used on really large amounts of data anyway*/ big_int_size = sizeof(HYPRE_BigInt); complex_size = sizeof(HYPRE_Complex); obj_size_bytes = hypre_max(big_int_size, complex_size); /* set up data to be sent to send procs */ /* for each proc, ex_contact_buf contains #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* first calculate total storage and make vec_starts arrays */ storage = 0; ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST); ex_contact_vec_starts[0] = -1; for (i = 0; i < num_real_procs; i++) { storage += 1 + 2 * num_rows_per_proc[i] + 2 * num_elements_total[i]; ex_contact_vec_starts[i + 1] = -storage - 1; /* need negative for next loop */ } hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST); /*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/ void_contact_buf = hypre_CTAlloc(char, storage * obj_size_bytes, HYPRE_MEMORY_HOST); index_ptr = void_contact_buf; /* step through with this index */ /* for each proc: #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* un-sort real_proc_id - we want to access data arrays in order, so cheaper to do this*/ us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows; i++) { us_real_proc_id[orig_order[i]] = real_proc_id[i]; } hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST); counter = 0; /* index into data arrays */ prev_id = -1; for (i = 0; i < num_rows; i++) { proc_id = us_real_proc_id[i]; /* can't use row list[i] - you loose the negative signs that differentiate add/set values */ row = off_proc_i[i * 2]; num_elements = row_list_num_elements[i]; /* find position of this processor */ indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs); in_i = ex_contact_vec_starts[indx]; index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes); /* first time for this processor - add the number of rows to the buffer */ if (in_i < 0) { in_i = -in_i - 1; /* re-calc. index_ptr since in_i was negative */ index_ptr = (void *) ((char *) void_contact_buf + in_i * obj_size_bytes); tmp_int = num_rows_per_proc[indx]; hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* add row # */ hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* add number of elements */ hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* now add col indices */ for (j = 0; j < num_elements; j++) { tmp_big_int = off_proc_j[counter + j]; /* col number */ hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i ++; } /* now add data */ for (j = 0; j < num_elements; j++) { tmp_complex = off_proc_data[counter++]; /* value */ hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* increment the indexes to keep track of where we are - we * adjust below to be actual starts*/ ex_contact_vec_starts[indx] = in_i; } /* some clean up */ hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST); hypre_TFree(orig_order, HYPRE_MEMORY_HOST); hypre_TFree(row_list, HYPRE_MEMORY_HOST); hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST); for (i = num_real_procs; i > 0; i--) { ex_contact_vec_starts[i] = ex_contact_vec_starts[i - 1]; } ex_contact_vec_starts[0] = 0; /* now send the data */ /***********************************/ /* first get the integer info in send_proc_obj */ /* the response we expect is just a confirmation*/ response_buf = NULL; response_buf_starts = NULL; /*build the response object*/ /* use the send_proc_obj for the info kept from contacts */ /*estimate inital storage allocation */ send_proc_obj.length = 0; send_proc_obj.storage_length = num_real_procs + 5; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = storage + 20; send_proc_obj.v_elements = hypre_TAlloc(char, obj_size_bytes * send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); response_obj2.fill_response = hypre_FillResponseIJOffProcVals; response_obj2.data1 = NULL; response_obj2.data2 = &send_proc_obj; max_response_size = 0; hypre_DataExchangeList(num_real_procs, ex_contact_procs, void_contact_buf, ex_contact_vec_starts, obj_size_bytes, 0, &response_obj2, max_response_size, 2, comm, (void **) &response_buf, &response_buf_starts); hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); /* Now we can unpack the send_proc_objects and call set and add to values functions. We unpack messages in a deterministic order, using processor rank */ num_recvs = send_proc_obj.length; argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); for (i = 0; i < num_recvs; i++) { argsort_contact_procs[i] = i; } /* This sort's the id array, but the original indices are stored in * argsort_contact_procs */ hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs - 1 ); /* alias */ recv_data_ptr = send_proc_obj.v_elements; recv_starts = send_proc_obj.vec_starts; for (i = 0; i < num_recvs; i++) { /* Find the current processor in order, and reset recv_data_ptr to that processor's message */ original_proc_indx = argsort_contact_procs[i]; /*current_proc = send_proc_obj.id[i];*/ indx = recv_starts[original_proc_indx]; recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx * obj_size_bytes); /* get the number of rows for this recv */ hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; for (j = 0; j < num_rows; j++) /* for each row: unpack info */ { /* row # */ hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* num elements for this row */ hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* col indices */ /* Need to check this again !!!! */ if (big_int_size == obj_size_bytes) { col_ptr = (HYPRE_BigInt *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes); } else /* copy data */ { if (big_int_data_size < num_elements) { big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST); } for (k = 0; k < num_elements; k++) { hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_ptr = big_int_data; } /* col data */ if (complex_size == obj_size_bytes) { col_data_ptr = (HYPRE_Complex *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements * obj_size_bytes); } else /* copy data */ { if (complex_data_size < num_elements) { complex_data = hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST); } for (k = 0; k < num_elements; k++) { hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_data_ptr = complex_data; } if (memory_location == HYPRE_MEMORY_HOST) { hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr); } else { HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements; if (nelm_new > off_proc_nelm_recv_max) { off_proc_nelm_recv_max = nelm_new * 2; off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); } HYPRE_Int i; for (i = 0; i < num_elements; i++) { off_proc_i_recv[off_proc_nelm_recv_cur + i] = row; } hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); off_proc_nelm_recv_cur = nelm_new; } indx += (num_elements * 2); } } if (memory_location == HYPRE_MEMORY_DEVICE) { off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d, off_proc_data_recv_d, "add"); #endif } hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST); if (big_int_data) { hypre_TFree(big_int_data, HYPRE_MEMORY_HOST); } if (complex_data) { hypre_TFree(complex_data, HYPRE_MEMORY_HOST); } if (memory_location == HYPRE_MEMORY_DEVICE) { hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST); } hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseIJOffProcVals * Fill response function for the previous function (2nd data exchange) *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int index, count, elength; HYPRE_Int object_size; void *index_ptr; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2; object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex)); hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for vec starts * and id */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length += 20; /*add space for 20 more contact*/ send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); if ( send_proc_obj->id != NULL) { send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* current number of elements */ if ( send_proc_obj->id != NULL) { send_proc_obj->id[count] = contact_proc; } /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 100); elength += index; send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements, char, elength * object_size, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ index_ptr = (void *) ((char *) send_proc_obj->v_elements + index * object_size); hypre_TMemcpy(index_ptr, p_recv_contact_buf, char, object_size * contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts[count + 1] = index + contact_size; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*--------------------------------------------------------------------*/ HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length) { HYPRE_Int low, high, m; low = 0; high = list_length; if (value >= list[high] || value < list[low]) { return -1; } else { while (low + 1 < high) { m = (low + high) / 2; if (value < list[m]) { high = m; } else if (value >= list[m]) { low = m; } } return low; } } /****************************************************************************** * * hypre_IJMatrixAssembleParCSR * * assembles IJMatrix from AuxParCSRMatrix auxiliary structure *****************************************************************************/ HYPRE_Int hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *diag_j; HYPRE_Int *offd_j = NULL; HYPRE_Complex *diag_data; HYPRE_Complex *offd_data = NULL; HYPRE_Int i, j, j0; HYPRE_Int num_cols_offd; HYPRE_Int *diag_pos; HYPRE_BigInt *col_map_offd; HYPRE_Int *rownnz; HYPRE_Int *row_length; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int my_id, num_procs; HYPRE_Int num_rows; HYPRE_Int num_rownnz; HYPRE_Int i_diag, i_offd; HYPRE_BigInt col_0, col_n; HYPRE_Int nnz_offd; HYPRE_BigInt *big_offd_j; HYPRE_BigInt *tmp_j; HYPRE_Complex temp; HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix); HYPRE_Int off_proc_i_indx; HYPRE_Int max_off_proc_elmts; HYPRE_Int current_num_elmts; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int offd_proc_elmts; //HYPRE_Int new_off_proc_i_indx; //HYPRE_Int cancel_indx; //HYPRE_Int col_indx; //HYPRE_Int current_indx; //HYPRE_Int current_i; //HYPRE_Int row_len; HYPRE_Int max_num_threads; HYPRE_Int aux_flag, aux_flag_global; HYPRE_ANNOTATE_FUNC_BEGIN; max_num_threads = hypre_NumThreads(); /* first find out if anyone has an aux_matrix, and create one if you don't * have one, but other procs do */ aux_flag = 0; aux_flag_global = 0; if (aux_matrix) { aux_flag = 1; } hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (aux_flag_global && (!aux_flag)) { hypre_MPI_Comm_rank(comm, &my_id); num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } if (aux_matrix) { /* first delete all cancelled elements */ /*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); if (cancel_indx) { current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); col_indx = 0; current_i = 0; current_indx = 0; new_off_proc_i_indx = off_proc_i_indx; for (i=0; i < off_proc_i_indx; i= i+2) { row_len = off_proc_i[i+1]; for (j=0; j < off_proc_i[i+1]; j++) { if (off_proc_j[col_indx] == -1) { col_indx++; row_len--; current_num_elmts--; } else { off_proc_j[current_indx] = off_proc_j[col_indx]; off_proc_data[current_indx++] = off_proc_data[col_indx++]; } } if (row_len) { off_proc_i[current_i] = off_proc_i[i]; off_proc_i[current_i+1] = row_len; current_i += 2; } else { new_off_proc_i_indx -= 2; } } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; }*/ off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (offd_proc_elmts) { max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); hypre_IJMatrixAssembleOffProcValsParCSR( matrix, off_proc_i_indx, max_off_proc_elmts, current_num_elmts, HYPRE_MEMORY_HOST, off_proc_i, off_proc_j, off_proc_data); } } if (hypre_IJMatrixAssembleFlag(matrix) == 0) { hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); col_0 = col_partitioning[0]; col_n = col_partitioning[1] - 1; /* move data into ParCSRMatrix if not there already */ if (hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { HYPRE_Int *diag_array; HYPRE_Int *offd_array; /* Update nonzero rows of aux_matrix */ hypre_AuxParCSRMatrixSetRownnz(aux_matrix); aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(aux_matrix); rownnz = hypre_AuxParCSRMatrixRownnz(aux_matrix); diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); diag_pos = hypre_TAlloc(HYPRE_Int, num_rownnz, HYPRE_MEMORY_HOST); i_diag = i_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i, j, i_diag, i_offd) #endif { HYPRE_BigInt *local_j; HYPRE_Complex *local_data; HYPRE_Int ii, rest, size, ns, ne; HYPRE_Int num_threads, my_thread_num; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); size = num_rownnz / num_threads; rest = num_rownnz - size * num_threads; if (my_thread_num < rest) { ns = my_thread_num * (size + 1); ne = (my_thread_num + 1) * (size + 1); } else { ns = my_thread_num * size + rest; ne = (my_thread_num + 1) * size + rest; } i_diag = i_offd = 0; for (i = ns; i < ne; i++) { ii = rownnz ? rownnz[i] : i; local_j = aux_j[ii]; local_data = aux_data[ii]; diag_pos[i] = -1; for (j = 0; j < row_length[ii]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { i_offd++; } else { i_diag++; if ((HYPRE_Int)(local_j[j] - col_0) == i) { diag_pos[i] = j; } } } } diag_array[my_thread_num] = i_diag; offd_array[my_thread_num] = i_offd; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { i_diag = 0; i_offd = 0; for (i = 0; i < num_threads; i++) { i_diag += diag_array[i]; i_offd += offd_array[i]; diag_array[i] = i_diag; offd_array[i] = i_offd; } diag_i[num_rows] = i_diag; offd_i[num_rows] = i_offd; hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd)); diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag)); diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag)); offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd)); offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd)); big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd)); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num) { i_diag = diag_array[my_thread_num - 1]; i_offd = offd_array[my_thread_num - 1]; } else { i_diag = 0; i_offd = 0; } for (i = ns; i < ne; i++) { ii = rownnz ? rownnz[i] : i; diag_i[ii] = i_diag; offd_i[ii] = i_offd; local_j = aux_j[ii]; local_data = aux_data[ii]; if (diag_pos[i] > -1) { diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0); diag_data[i_diag++] = local_data[diag_pos[i]]; } for (j = 0; j < row_length[ii]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { big_offd_j[i_offd] = local_j[j]; offd_data[i_offd++] = local_data[j]; } else if (j != diag_pos[i]) { diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0); diag_data[i_diag++] = local_data[j]; } } } /* Correct diag_i and offd_i */ if (rownnz != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < (ne - 1); i++) { for (ii = rownnz[i] + 1; ii < rownnz[i + 1]; ii++) { diag_i[ii] = diag_i[rownnz[i + 1]]; offd_i[ii] = offd_i[rownnz[i + 1]]; } } if (my_thread_num < (num_threads - 1)) { for (ii = rownnz[ne - 1] + 1; ii < rownnz[ne]; ii++) { diag_i[ii] = diag_i[rownnz[ne]]; offd_i[ii] = offd_i[rownnz[ne]]; } } else { for (ii = rownnz[ne - 1] + 1; ii < num_rows; ii++) { diag_i[ii] = diag_i[num_rows]; offd_i[ii] = offd_i[num_rows]; } } } } /* end parallel region */ hypre_TFree(diag_array, HYPRE_MEMORY_HOST); hypre_TFree(offd_array, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_data; hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows]; if (offd_i[num_rows] > 0) { hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixBigJ(offd) = big_offd_j; hypre_CSRMatrixData(offd) = offd_data; } hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows]; hypre_TFree(diag_pos, HYPRE_MEMORY_HOST); } else { /* move diagonal element into first space */ big_offd_j = hypre_CSRMatrixBigJ(offd); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private (i,j,j0,temp) #endif for (i = 0; i < num_rows; i++) { j0 = diag_i[i]; for (j = j0; j < diag_i[i + 1]; j++) { if (diag_j[j] == i) { temp = diag_data[j0]; diag_data[j0] = diag_data[j]; diag_data[j] = temp; diag_j[j] = diag_j[j0]; diag_j[j0] = i; break; } } } offd_j = hypre_CSRMatrixJ(offd); if (!offd_j && offd_i[num_rows]) { offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixJ(offd) = offd_j; } } /* generate the nonzero rows inside offd and diag by calling */ hypre_CSRMatrixSetRownnz(diag); hypre_CSRMatrixSetRownnz(offd); /* generate col_map_offd */ nnz_offd = offd_i[num_rows]; if (nnz_offd) { tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST); for (i = 0; i < nnz_offd; i++) { tmp_j[i] = big_offd_j[i]; } hypre_BigQsort0(tmp_j, 0, nnz_offd - 1); num_cols_offd = 1; for (i = 0; i < nnz_offd - 1; i++) { if (tmp_j[i + 1] > tmp_j[i]) { tmp_j[num_cols_offd++] = tmp_j[i + 1]; } } col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { col_map_offd[i] = tmp_j[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < nnz_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, big_offd_j[i], num_cols_offd); } if (base) { for (i = 0; i < num_cols_offd; i++) { col_map_offd[i] -= base; } } hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = NULL; } hypre_IJMatrixAssembleFlag(matrix) = 1; } hypre_AuxParCSRMatrixDestroy(aux_matrix); hypre_IJMatrixTranslator(matrix) = NULL; HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixSetValuesOMPParCSR * * sets values in an IJMatrix before assembly, * use of this routine requires that the values in rows are different from each * other, i.e rows[i] != rows[j] for i != j * to ensure accurate threading * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; //HYPRE_Int cancel_indx; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; //HYPRE_Int *offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); //HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); //offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); col_0 = col_partitioning[0]; col_n = col_partitioning[1] - 1; first = hypre_IJMatrixGlobalFirstCol(matrix); if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } return hypre_error_flag; } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows / num_threads; rest = nrows - len * num_threads; if (my_thread_num < rest) { ns = my_thread_num * (len + 1); ne = (my_thread_num + 1) * (len + 1); } else { ns = my_thread_num * len + rest; ne = (my_thread_num + 1) * len + rest; } for (ii = ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ size = diag_i[row_local + 1] - diag_i[row_local] + offd_i[row_local + 1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local + 1]; len_offd = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } for (j = pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j = pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* processor does not own the row */ //else /*search for previous occurrences and cancel them */ /*{ if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; offproc_cnt[my_thread_num]++; */ /*cancel_indx++;*/ /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /*} else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ //} //} } } /*end parallel region */ } else /* matrix not assembled */ { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_data = hypre_CSRMatrixData(offd); big_offd_j = hypre_CSRMatrixBigJ(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows / num_threads; rest = nrows - len * num_threads; if (my_thread_num < rest) { ns = my_thread_num * (len + 1); ne = (my_thread_num + 1) * (len + 1); } else { ns = my_thread_num * len + rest; ne = (my_thread_num + 1) * len + rest; } for (ii = ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i = 0; i < n; i++) { for (j = 0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size + tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size + tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size + tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i = 0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local + 1]; offd_space = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j = offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j = diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* processor does not own the row */ /*else { if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; */ /*cancel_indx++;*/ //offproc_cnt[my_thread_num]++; /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /* } else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ /*} }*/ } } /* end parallel region */ } /*if (error_flag) { return hypre_error_flag; } if (aux_matrix) { for (i1=0; i1 < max_num_threads; i1++) { cancel_indx += offproc_cnt[i1]; } hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx; }*/ //hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesOMPParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int **offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; HYPRE_Int i1; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < max_num_threads; i1++) { offproc_cnt[i1] = NULL; } col_0 = col_partitioning[0]; col_n = col_partitioning[1] - 1; first = hypre_IJMatrixGlobalFirstCol(matrix); if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */ { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows / num_threads; rest = nrows - len * num_threads; if (my_thread_num < rest) { ns = my_thread_num * (len + 1); ne = (my_thread_num + 1) * (len + 1); } else { ns = my_thread_num * len + rest; ne = (my_thread_num + 1) * len + rest; } for (ii = ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ size = diag_i[row_local + 1] - diag_i[row_local] + offd_i[row_local + 1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local + 1]; len_offd = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd, cols[indx] - first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } for (j = pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j = pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* not my row */ /* need to find solution for threaded version!!!! */ /* could save row number and process later .... */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i + 2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i + 1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i + 1] = indx; my_offproc_cnt[1] += 2; } } } } /* end parallel region */ } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_Int i, j, ii, n; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows / num_threads; rest = nrows - len * num_threads; if (my_thread_num < rest) { ns = my_thread_num * (len + 1); ne = (my_thread_num + 1) * (len + 1); } else { ns = my_thread_num * len + rest; ne = (my_thread_num + 1) * len + rest; } for (ii = ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[0] && row < row_partitioning[1]) { row_local = (HYPRE_Int)(row - row_partitioning[0]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i = 0; i < n; i++) { for (j = 0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size + tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size + tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size + tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size + tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i = 0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local + 1]; offd_space = offd_i[row_local + 1]; not_found = 1; for (i = 0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j = offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j = diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx] - col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx] - col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i + 2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i + 1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size + 200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i + 1] = indx; my_offproc_cnt[1] += 2; } } } } /*end parallel region */ } if (error_flag) { return hypre_error_flag; } if (!aux_matrix) { HYPRE_Int size = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } for (i1 = 0; i1 < max_num_threads; i1++) { if (offproc_cnt[i1]) { HYPRE_Int *my_offproc_cnt = offproc_cnt[i1]; HYPRE_Int i, i2, ii, n, indx; HYPRE_BigInt row; for (i2 = 2; i2 < my_offproc_cnt[1]; i2 += 2) { ii = my_offproc_cnt[i2]; row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = my_offproc_cnt[i2 + 1]; current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n, 1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3 * n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2 * max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i = 0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST); } } hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; }
GB_unaryop__lnot_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_uint16 // op(A') function: GB_tran__lnot_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_uint16 ( uint16_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/wand.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MagickPathExtent], *description; RectangleInfo extent; MagickWand *wand; Image *image; CacheView *view; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireCriticalMemory(sizeof(*clone_view)); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue, wand_view->exception); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=MagickWandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsTLS(PixelWand ***pixel_wands, const size_t number_wands) { ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->pixel_wands=DestroyPixelsTLS(wand_view->pixel_wands, wand_view->extent.width); wand_view->image=DestroyImage(wand_view->image); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~MagickWandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; ssize_t x; Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) duplex->extent.width; x++) { PixelSetQuantumPixel(duplex->image,duplex_pixels, duplex->pixel_wands[id][x]); duplex_pixels+=GetPixelChannels(duplex->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MagickPathExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); const Quantum *pixels; ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != MagickWandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsTLS(const size_t number_wands) { PixelWand ***pixel_wands; ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsTLS(pixel_wands,number_wands)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireCriticalMemory(sizeof(*wand_view)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->pixel_wands=AcquirePixelsTLS(wand_view->extent.width); wand_view->exception=exception; if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireCriticalMemory(sizeof(*wand_view)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->exception=exception; wand_view->pixel_wands=AcquirePixelsTLS(wand_view->extent.width); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == MagickWandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; ssize_t x; Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], pixels); pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(destination_image,destination->description, progress,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; const Quantum *magick_restrict pixels; ssize_t x; Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; ssize_t x; Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) { PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels); pixels+=GetPixelChannels(source->image); } sync=SyncCacheViewAuthenticPixels(source->view,source->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
BiCGStab.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <math.h> #include <mkl_blas.h> #include <mpi.h> #include <hb_io.h> #include <vector> #include "reloj.h" #include "ScalarVectors.h" #include "SparseProduct.h" #include "ToolsMPI.h" #include "matrix.h" #include "common.h" #include "exblas/exdot.h" // ================================================================================ #define DIRECT_ERROR 0 #define PRECOND 1 #define VECTOR_OUTPUT 0 void BiCGStab (SparseMatrix mat, double *x, double *b, int *sizes, int *dspls, int myId) { int size = mat.dim2, sizeR = mat.dim1; int IONE = 1; double DONE = 1.0, DMONE = -1.0, DZERO = 0.0; int n, n_dist, iter, maxiter, nProcs; double beta, tol, tol0, alpha, umbral, rho, omega, tmp; double *s = NULL, *q = NULL, *r = NULL, *p = NULL, *r0 = NULL, *y = NULL, *p_hat = NULL, *q_hat = NULL; double *aux = NULL; double t1, t2, t3, t4; double reduce[2]; #if PRECOND int i, *posd = NULL; double *diags = NULL; #endif MPI_Comm_size(MPI_COMM_WORLD, &nProcs); n = size; n_dist = sizeR; maxiter = 16 * size; umbral = 1.0e-8; CreateDoubles (&s, n_dist); CreateDoubles (&q, n_dist); CreateDoubles (&r, n_dist); CreateDoubles (&r0, n_dist); CreateDoubles (&p, n_dist); CreateDoubles (&y, n_dist); #if DIRECT_ERROR // init exact solution double *res_err = NULL, *x_exact = NULL; CreateDoubles (&x_exact, n_dist); CreateDoubles (&res_err, n_dist); InitDoubles (x_exact, n_dist, DONE, DZERO); #endif // DIRECT_ERROR #if PRECOND CreateInts (&posd, n_dist); CreateDoubles (&p_hat, n_dist); CreateDoubles (&q_hat, n_dist); CreateDoubles (&diags, n_dist); GetDiagonalSparseMatrix2 (mat, dspls[myId], diags, posd); #pragma omp parallel for for (i=0; i<n_dist; i++) diags[i] = DONE / diags[i]; #endif CreateDoubles (&aux, n); #if VECTOR_OUTPUT // write to file for testing purpose FILE *fp; if (myId == 0) { char name[50]; sprintf(name, "exblas-%d.txt", nProcs); fp = fopen(name,"w"); } #endif iter = 0; MPI_Allgatherv (x, sizeR, MPI_DOUBLE, aux, sizes, dspls, MPI_DOUBLE, MPI_COMM_WORLD); InitDoubles (s, sizeR, DZERO, DZERO); ProdSparseMatrixVectorByRows (mat, 0, aux, s); // s = A * x dcopy (&n_dist, b, &IONE, r, &IONE); // r = b daxpy (&n_dist, &DMONE, s, &IONE, r, &IONE); // r -= s dcopy (&n_dist, r, &IONE, p, &IONE); // p = r dcopy (&n_dist, r, &IONE, r0, &IONE); // r0 = r // compute tolerance and <r0,r0> std::vector<int64_t> h_superacc(2 * exblas::BIN_COUNT); std::vector<int64_t> h_superacc_tol(exblas::BIN_COUNT); int imin=exblas::IMIN, imax=exblas::IMAX; exblas::cpu::exdot (n_dist, r, r, &h_superacc[0]); // ReproAllReduce -- Begin exblas::cpu::Normalize(&h_superacc[0], imin, imax); if (myId == 0) { MPI_Reduce (MPI_IN_PLACE, &h_superacc[0], exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } else { MPI_Reduce (&h_superacc[0], NULL, exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } if (myId == 0) { rho = exblas::cpu::Round( &h_superacc[0] ); } MPI_Bcast(&rho, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); // ReproAllReduce -- End tol0 = sqrt (rho); tol = tol0; #if DIRECT_ERROR // compute direct error double direct_err; dcopy (&n_dist, x_exact, &IONE, res_err, &IONE); // res_err = x_exact daxpy (&n_dist, &DMONE, x, &IONE, res_err, &IONE); // res_err -= x // compute inf norm direct_err = norm_inf(n_dist, res_err); MPI_Allreduce(MPI_IN_PLACE, &direct_err, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); // // compute euclidean norm // direct_err = ddot (&n_dist, res_err, &IONE, res_err, &IONE); // direct_err = res_err' * res_err // MPI_Allreduce(MPI_IN_PLACE, &direct_err, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); // direct_err = sqrt(direct_err); #endif // DIRECT_ERROR MPI_Barrier(MPI_COMM_WORLD); if (myId == 0) reloj (&t1, &t2); while ((iter < maxiter) && (tol > umbral)) { #if PRECOND VvecDoubles (DONE, diags, p, DZERO, p_hat, n_dist); // p_hat = D^-1 * p #else p_hat = p; #endif MPI_Allgatherv (p_hat, sizeR, MPI_DOUBLE, aux, sizes, dspls, MPI_DOUBLE, MPI_COMM_WORLD); InitDoubles (s, sizeR, DZERO, DZERO); ProdSparseMatrixVectorByRows (mat, 0, aux, s); // s = A * p if (myId == 0) #if DIRECT_ERROR printf ("%d \t %a \t %a \n", iter, tol, direct_err); #else printf ("%d \t %a \n", iter, tol); #endif // DIRECT_ERROR exblas::cpu::exdot (n_dist, r0, s, &h_superacc[0]); // alpha = <r_0, r_iter> / <r_0, s> // ReproAllReduce -- Begin exblas::cpu::Normalize(&h_superacc[0], imin, imax); if (myId == 0) { MPI_Reduce (MPI_IN_PLACE, &h_superacc[0], exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } else { MPI_Reduce (&h_superacc[0], NULL, exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } if (myId == 0) { alpha = exblas::cpu::Round( &h_superacc[0] ); } MPI_Bcast(&alpha, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); // ReproAllReduce -- End alpha = rho / alpha; dcopy (&n_dist, r, &IONE, q, &IONE); // q = r tmp = -alpha; daxpy (&n_dist, &tmp, s, &IONE, q, &IONE); // q = r - alpha * s; // second spmv #if PRECOND VvecDoubles (DONE, diags, q, DZERO, q_hat, n_dist); // q_hat = D^-1 * q #else q_hat = q; #endif MPI_Allgatherv (q_hat, sizeR, MPI_DOUBLE, aux, sizes, dspls, MPI_DOUBLE, MPI_COMM_WORLD); InitDoubles (y, sizeR, DZERO, DZERO); ProdSparseMatrixVectorByRows (mat, 0, aux, y); // y = A * q // omega = <q, y> / <y, y> exblas::cpu::exdot (n_dist, q, y, &h_superacc[0]); exblas::cpu::exdot (n_dist, y, y, &h_superacc_tol[0]); // ReproAllReduce -- Begin exblas::cpu::Normalize(&h_superacc[0], imin, imax); exblas::cpu::Normalize(&h_superacc_tol[0], imin, imax); // merge two superaccs into one for reduction for (int i = 0; i < exblas::BIN_COUNT; i++) { h_superacc[exblas::BIN_COUNT + i] = h_superacc_tol[i]; } if (myId == 0) { MPI_Reduce (MPI_IN_PLACE, &h_superacc[0], 2*exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } else { MPI_Reduce (&h_superacc[0], NULL, 2*exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } if (myId == 0) { // split them back for (int i = 0; i < exblas::BIN_COUNT; i++) { h_superacc_tol[i] = h_superacc[exblas::BIN_COUNT + i]; } reduce[0] = exblas::cpu::Round( &h_superacc[0] ); reduce[1] = exblas::cpu::Round( &h_superacc_tol[0] ); } MPI_Bcast(reduce, 2, MPI_DOUBLE, 0, MPI_COMM_WORLD); // ReproAllReduce -- End omega = reduce[0] / reduce[1]; // x+1 = x + alpha * p + omega * q daxpy (&n_dist, &alpha, p_hat, &IONE, x, &IONE); daxpy (&n_dist, &omega, q_hat, &IONE, x, &IONE); // r+1 = q - omega * y dcopy (&n_dist, q, &IONE, r, &IONE); // r = q tmp = -omega; daxpy (&n_dist, &tmp, y, &IONE, r, &IONE); // r = q - omega * y; // rho = <r0, r+1> and tolerance // cannot just use <r0, r> as the stopping criteria since it slows the convergence compared to <r, r> exblas::cpu::exdot (n_dist, r0, r, &h_superacc[0]); exblas::cpu::exdot (n_dist, r, r, &h_superacc_tol[0]); // ReproAllReduce -- Begin exblas::cpu::Normalize(&h_superacc[0], imin, imax); exblas::cpu::Normalize(&h_superacc_tol[0], imin, imax); // merge two superaccs into one for reduction for (int i = 0; i < exblas::BIN_COUNT; i++) { h_superacc[exblas::BIN_COUNT + i] = h_superacc_tol[i]; } if (myId == 0) { MPI_Reduce (MPI_IN_PLACE, &h_superacc[0], 2*exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } else { MPI_Reduce (&h_superacc[0], NULL, 2*exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } if (myId == 0) { // split them back for (int i = 0; i < exblas::BIN_COUNT; i++) { h_superacc_tol[i] = h_superacc[exblas::BIN_COUNT + i]; } reduce[0] = exblas::cpu::Round( &h_superacc[0] ); reduce[1] = exblas::cpu::Round( &h_superacc_tol[0] ); } MPI_Bcast(reduce, 2, MPI_DOUBLE, 0, MPI_COMM_WORLD); // ReproAllReduce -- End tmp = reduce[0]; tol = sqrt (reduce[1]) / tol0; // beta = (alpha / omega) * <r0, r+1> / <r0, r> beta = (alpha / omega) * (tmp / rho); rho = tmp; // p+1 = r+1 + beta * (p - omega * s) tmp = -omega; daxpy (&n_dist, &tmp, s, &IONE, p, &IONE); // p -= omega * s dscal (&n_dist, &beta, p, &IONE); // p = beta * p daxpy (&n_dist, &DONE, r, &IONE, p, &IONE); // p += r #if DIRECT_ERROR // compute direct error dcopy (&n_dist, x_exact, &IONE, res_err, &IONE); // res_err = x_exact daxpy (&n_dist, &DMONE, x, &IONE, res_err, &IONE); // res_err -= x // compute inf norm direct_err = norm_inf(n_dist, res_err); MPI_Allreduce(MPI_IN_PLACE, &direct_err, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); // // compute euclidean norm // direct_err = ddot (&n_dist, res_err, &IONE, res_err, &IONE); // MPI_Allreduce(MPI_IN_PLACE, &direct_err, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); // direct_err = sqrt(direct_err); #endif // DIRECT_ERROR iter++; } MPI_Barrier(MPI_COMM_WORLD); if (myId == 0) reloj (&t3, &t4); #if VECTOR_OUTPUT // print aux MPI_Allgatherv (x, n_dist, MPI_DOUBLE, aux, sizes, dspls, MPI_DOUBLE, MPI_COMM_WORLD); if (myId == 0) { fprintf(fp, "%d\n", iter); for (int ip = 0; ip < n; ip++) fprintf(fp, "%a\n", aux[ip]); fclose(fp); } #endif if (myId == 0) { printf ("Size: %d \n", n); printf ("Iter: %d \n", iter); printf ("Tol: %a \n", tol); printf ("Time_loop: %20.10e\n", (t3-t1)); printf ("Time_iter: %20.10e\n", (t3-t1)/iter); } RemoveDoubles (&aux); RemoveDoubles (&s); RemoveDoubles (&q); RemoveDoubles (&r); RemoveDoubles (&p); RemoveDoubles (&r0); RemoveDoubles (&y); #if PRECOND RemoveDoubles (&diags); RemoveInts (&posd); RemoveDoubles(&p_hat); RemoveDoubles (&q_hat); #endif } /*********************************************************************************/ int main (int argc, char **argv) { int dim; double *sol1 = NULL, *sol2 = NULL; int index = 0, indexL = 0; SparseMatrix mat = {0, 0, NULL, NULL, NULL}, sym = {0, 0, NULL, NULL, NULL}; int root = 0, myId, nProcs; int dimL, dspL, *vdimL = NULL, *vdspL = NULL; SparseMatrix matL = {0, 0, NULL, NULL, NULL}; double *sol1L = NULL, *sol2L = NULL; int mat_from_file, nodes, size_param, stencil_points; if (argc == 3) { mat_from_file = atoi(argv[2]); } else { mat_from_file = atoi(argv[2]); nodes = atoi(argv[3]); size_param = atoi(argv[4]); stencil_points = atoi(argv[5]); } /***************************************/ MPI_Init (&argc, &argv); // Definition of the variables nProcs and myId MPI_Comm_size(MPI_COMM_WORLD, &nProcs); MPI_Comm_rank(MPI_COMM_WORLD, &myId); root = nProcs-1; root = 0; /***************************************/ CreateInts (&vdimL, nProcs); CreateInts (&vdspL, nProcs); if(mat_from_file) { if (myId == root) { // Creating the matrix ReadMatrixHB (argv[1], &sym); TransposeSparseMatrices (sym, 0, &mat, 0); dim = mat.dim1; } // Distributing the matrix dim = DistributeMatrix (mat, index, &matL, indexL, vdimL, vdspL, root, MPI_COMM_WORLD); dimL = vdimL[myId]; dspL = vdspL[myId]; } else { dim = size_param * size_param * size_param; int divL, rstL, i; divL = (dim / nProcs); rstL = (dim % nProcs); for (i=0; i<nProcs; i++) vdimL[i] = divL + (i < rstL); vdspL[0] = 0; for (i=1; i<nProcs; i++) vdspL[i] = vdspL[i-1] + vdimL[i-1]; dimL = vdimL[myId]; dspL = vdspL[myId]; int band_width = size_param * (size_param + 1) + 1; band_width = 100 * nodes; long nnz_here = ((long) (stencil_points + 2 * band_width)) * dimL; printf ("dimL: %d, nodes: %d, size_param: %d, band_width: %d, stencil_points: %d, nnz_here: %ld\n", dimL, nodes, size_param, band_width, stencil_points, nnz_here); allocate_matrix(dimL, dim, nnz_here, &matL); generate_Poisson3D_filled(&matL, size_param, stencil_points, band_width, dspL, dimL, dim); // To generate ill-conditioned matrices // double factor = 1.0e6; // ScaleFirstRowCol(matL, dspL, dimL, myId, root, factor); } MPI_Barrier(MPI_COMM_WORLD); // Creating the vectors CreateDoubles (&sol1, dim); CreateDoubles (&sol2, dim); CreateDoubles (&sol1L, dimL); CreateDoubles (&sol2L, dimL); InitDoubles (sol2, dim, 0.0, 0.0); InitDoubles (sol1L, dimL, 0.0, 0.0); InitDoubles (sol2L, dimL, 0.0, 0.0); /***************************************/ int IONE = 1; double beta = 1.0 / sqrt(dim); if(mat_from_file) { // compute b = A * x_c, x_c = 1/sqrt(nbrows) InitDoubles (sol1, dim, 1.0, 0.0); ProdSparseMatrixVectorByRows (matL, 0, sol1, sol1L); // s = A * x dscal (&dimL, &beta, sol1L, &IONE); // s = beta * s } else { InitDoubles (sol1, dim, 0.0, 0.0); int k=0; int *vptrM = matL.vptr; for (int i=0; i < matL.dim1; i++) { for(int j=vptrM[i]; j<vptrM[i+1]; j++) { sol1L[k] += matL.vval[j]; } } } MPI_Scatterv (sol2, vdimL, vdspL, MPI_DOUBLE, sol2L, dimL, MPI_DOUBLE, root, MPI_COMM_WORLD); BiCGStab (matL, sol2L, sol1L, vdimL, vdspL, myId); // Error computation ||b-Ax|| // if(mat_from_file) { MPI_Allgatherv (sol2L, dimL, MPI_DOUBLE, sol2, vdimL, vdspL, MPI_DOUBLE, MPI_COMM_WORLD); InitDoubles (sol2L, dimL, 0, 0); ProdSparseMatrixVectorByRows (matL, 0, sol2, sol2L); double DMONE = -1.0; daxpy (&dimL, &DMONE, sol2L, &IONE, sol1L, &IONE); // ReproAllReduce -- Begin std::vector<int64_t> h_superacc(exblas::BIN_COUNT); exblas::cpu::exdot (dimL, sol1L, sol1L, &h_superacc[0]); int imin=exblas::IMIN, imax=exblas::IMAX; exblas::cpu::Normalize(&h_superacc[0], imin, imax); if (myId == 0) { MPI_Reduce (MPI_IN_PLACE, &h_superacc[0], exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } else { MPI_Reduce (&h_superacc[0], NULL, exblas::BIN_COUNT, MPI_INT64_T, MPI_SUM, 0, MPI_COMM_WORLD); } if (myId == 0) { beta = exblas::cpu::Round( &h_superacc[0] ); } MPI_Bcast(&beta, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); // ReproAllReduce -- End // } else { // // case with x_exact = {1.0} // for (int i=0; i<dimL; i++) // sol2L[i] -= 1.0; // beta = ddot (&dimL, sol2L, &IONE, sol2L, &IONE); // } beta = sqrt(beta); if (myId == 0) printf ("Error: %20.10e\n", beta); /***************************************/ // Freeing memory RemoveDoubles (&sol1); RemoveDoubles (&sol2); RemoveDoubles (&sol1L); RemoveDoubles (&sol2L); RemoveInts (&vdspL); RemoveInts (&vdimL); if (myId == root) { RemoveSparseMatrix (&mat); RemoveSparseMatrix (&sym); } MPI_Finalize (); return 0; }
GB_unop__isfinite_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isfinite_bool_fp32) // op(A') function: GB (_unop_tran__isfinite_bool_fp32) // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isfinite_bool_fp32) ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isfinite (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isfinite_bool_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
stepwise_tangent.h
#ifndef __MIXTURE_MODEL_OPT_STEPWISE_TANGENT_H #define __MIXTURE_MODEL_OPT_STEPWISE_TANGENT_H #include <vector> #include <functional> #include <numeric> #include <cassert> #include <atomic> #include <algorithm> #include <iomanip> #include <iostream> #include <fstream> #include <string> #include <boost/thread.hpp> #include "../kdtree-eigen/kdtree_eigen.h" #include "../distribution.h" #include "../mixture_model.h" #include "../samples.h" #include "../sphere_volume.h" #include "util.h" // #include "../gsl/gsl" #define TANGENT_DEBUG 0 #define SPLIT_AND_MERGE 0 namespace jmm { template< typename Scalar, int t_meanDims, int t_covDims > struct SDMMParams { using Vectord = Eigen::Matrix<Scalar, t_meanDims, 1>; using Matrixd = Eigen::Matrix<Scalar, t_covDims, t_covDims>; EIGEN_MAKE_ALIGNED_OPERATOR_NEW SDMMParams(int size) : size(size) { heuristicWeight = Scalar(0.f); weights.resize(size, Scalar(0.f)); means.resize(size, Vectord::Zero()); covs.resize(size, Matrixd::Zero()); densitySum.resize(size, Scalar(0.f)); logDensitySum.resize(size, Scalar(0.f)); logPdfSum.resize(size, Scalar(0.f)); } SDMMParams(SDMMParams&& other) = default; SDMMParams(const SDMMParams& other) = default; SDMMParams& operator=(SDMMParams&& other) = default; SDMMParams& operator=(const SDMMParams& other) = default; void setZero() { heuristicWeight = Scalar(0.f); std::fill(weights.begin(), weights.end(), Scalar(0.f)); std::fill(means.begin(), means.end(), Vectord::Zero()); std::fill(covs.begin(), covs.end(), Matrixd::Zero()); std::fill(densitySum.begin(), densitySum.end(), Scalar(0.f)); std::fill(logPdfSum.begin(), logPdfSum.end(), Scalar(0.f)); std::fill(logDensitySum.begin(), logDensitySum.end(), Scalar(0.f)); } void normalize(SDMMParams& out, Scalar invWeight) { auto normalize = [&](const auto& value) { return value * invWeight; }; out.heuristicWeight = normalize(heuristicWeight); std::transform(weights.begin(), weights.end(), out.weights.begin(), normalize); std::transform(means.begin(), means.end(), out.means.begin(), normalize); std::transform(covs.begin(), covs.end(), out.covs.begin(), normalize); } SDMMParams& operator*=(Scalar multiplier) { auto multiply = [multiplier](auto& range) { std::transform( range.begin(), range.end(), range.begin(), [multiplier](const auto& value) { return value * multiplier; } ); }; heuristicWeight *= multiplier; multiply(weights); multiply(means); multiply(covs); return *this; } void sumProductInto(SDMMParams& out, Scalar multiplier) { auto sumProduct = [&](const auto& value, const auto& outValue) { return multiplier * value + outValue; }; auto sumProductIntoRange = [&](const auto& range, auto& outRange) { std::transform( range.begin(), range.end(), outRange.begin(), outRange.begin(), sumProduct ); }; out.heuristicWeight = sumProduct(heuristicWeight, out.heuristicWeight); sumProductIntoRange(weights, out.weights); sumProductIntoRange(means, out.means); sumProductIntoRange(covs, out.covs); } void sumErrorStatsInto(SDMMParams& out) { auto sum = [&](const auto& value, const auto& outValue) { return value + outValue; }; auto sumIntoRange = [&](const auto& range, auto& outRange) { std::transform( range.begin(), range.end(), outRange.begin(), outRange.begin(), sum ); }; sumIntoRange(densitySum, out.densitySum); sumIntoRange(logDensitySum, out.logDensitySum); sumIntoRange(logPdfSum, out.logPdfSum); } void calculateError(jmm::aligned_vector<Scalar>& error, Scalar nSamples) { if(error.size() != size) { error.resize(size, 0.f); } for(int component_i = 0; component_i < densitySum.size(); ++component_i) { // error[component_i] = ( // logDensitySum[component_i] - // std::log(densitySum[component_i]) * densitySum[component_i] - // logPdfSum[component_i] // ) / densitySum[component_i]; // error[component_i] /= nSamples; error[component_i] = densitySum[component_i] / nSamples; } } int size; Scalar heuristicWeight; jmm::aligned_vector<Scalar> weights; jmm::aligned_vector<Vectord> means; jmm::aligned_vector<Matrixd> covs; jmm::aligned_vector<Scalar> densitySum; jmm::aligned_vector<Scalar> logDensitySum; jmm::aligned_vector<Scalar> logPdfSum; }; template< int t_dims, int t_components, int t_conditionalDims, typename Scalar, template<int, int, typename> class Component_t, template<int, int, typename> class Marginal_t > class StepwiseTangentEM { protected: using MM = MixtureModel<t_dims, t_components, t_conditionalDims, Scalar, Component_t, Marginal_t>; using Vectord = typename MM::Vectord; using Component = typename MM::Component; using JointTangentVectord = typename Component::JointTangentVectord; using Matrixd = typename MM::Matrixd; using TangentSpaced = TangentSpace<t_dims, t_conditionalDims, Scalar>; constexpr static int t_statDims = Component::t_jointTangentDims; int iterationsRun; jmm::aligned_vector<int> iterationsRunForMixture; jmm::aligned_vector<bool> startedTraining; Scalar heuristicTotalWeight; jmm::aligned_vector<Scalar> totalWeightForMixture; SDMMParams<Scalar, t_statDims, t_statDims> statsGlobal; SDMMParams<Scalar, t_statDims, t_statDims> statsGlobalNormalized; SDMMParams<Scalar, t_statDims, t_statDims> newParams; jmm::aligned_vector<TangentSpaced> tangentSpacesNew; jmm::aligned_vector<Scalar> samplesPerComponentGlobal; jmm::aligned_vector<Scalar> samplesPerComponentInIterationGlobal; Scalar sampleCountGlobal; jmm::aligned_vector<Matrixd> bPriors; jmm::aligned_vector<Eigen::Matrix<Scalar, 3, 3>> bDepthPriors; Scalar alpha; Scalar bPrior; Scalar niPriorMinusOne; Scalar epsilon; bool decreasePrior; Scalar trainingBatch = 0; bool jacobianCorrection = false; int trainingCutoff = 32; Scalar minBPrior = 0; // 1e-8f; Scalar minNiPriorMinusOne = 0; // 1e-6; public: // StepwiseTangentEM() : StepwiseTangentEM(0.9, 0.5, 6e-5, 1e-100, true) { } // alpha(0.9), // bPrior(0.5), // niPriorMinusOne(6e-5), // epsilon(1e-100), // decreasePrior(true) {} StepwiseTangentEM( Scalar alpha=0.9, Eigen::Matrix<Scalar, 5, 1> bPrior=Eigen::Matrix<Scalar, 5, 1>::Constant(1e-5), Scalar niPriorMinusOne=6e-5, Scalar epsilon=1e-100, bool decreasePrior=true ) : statsGlobal(t_components), statsGlobalNormalized(t_components), newParams(t_components), alpha(alpha), // bPrior(bPrior), niPriorMinusOne(niPriorMinusOne), epsilon(epsilon), decreasePrior(decreasePrior) { iterationsRun = 0; iterationsRunForMixture.resize(t_components, 0); heuristicTotalWeight = 0.f; totalWeightForMixture.resize(t_components, 0.f); startedTraining.resize(t_components, false); samplesPerComponentGlobal.resize(t_components, 0); samplesPerComponentInIterationGlobal.resize(t_components); sampleCountGlobal = 0; Matrixd bPriorMatrix = bPrior.asDiagonal(); bPriors.resize(t_components, bPriorMatrix); bDepthPriors.resize(t_components, Eigen::Matrix<Scalar, 3, 3>::Identity() * epsilon); tangentSpacesNew.resize(t_components); }; void setJacobianCorrection(bool on) { jacobianCorrection = on; } jmm::aligned_vector<Matrixd>& getBPriors() { return bPriors; } auto& getStatsGlobal() { return statsGlobal; } jmm::aligned_vector<Eigen::Matrix<Scalar, 3, 3>>& getBDepthPriors() { return bDepthPriors; } void calculateStats( MM& distribution, Samples<t_dims, Scalar>& samples, bool countSamples, SDMMParams<Scalar, t_statDims, t_statDims>& stats, jmm::aligned_vector<Scalar>& samplesPerComponent, Scalar weightSum ) { Eigen::Matrix<Scalar, Eigen::Dynamic, 1> posterior(distribution.nComponents(), 1); Eigen::Matrix<Scalar, Eigen::Dynamic, 1> pdf(distribution.nComponents(), 1); Eigen::Matrix< Scalar, Component::t_jointTangentDims, Eigen::Dynamic > tangentVectors(Component::t_jointTangentDims, distribution.nComponents()); Scalar weightNormalization = (Scalar) samples.size() / weightSum; #pragma omp for for(int sample_i = 0; sample_i < samples.size(); ++sample_i) { if(!isValidSample(samples, sample_i)) { continue; } if(samples.weights(sample_i) == 0) { continue; } Vectord sample = samples.samples.col(sample_i); Scalar heuristicPosterior = 0; bool useHeuristic = samples.isDiffuse(sample_i); distribution.posteriorAndLog( sample, useHeuristic, samples.heuristicPdfs(sample_i), pdf, posterior, tangentVectors, heuristicPosterior ); stats.heuristicWeight += samples.weights(sample_i) * heuristicPosterior; const auto& components = distribution.components(); for(int component_i = 0; component_i < distribution.nComponents(); ++component_i) { if(posterior(component_i) < 1e-10) { // TODO: still calculate marginals and normalization continue; } // Scalar weightAugmented = std::sqrt(samples.weights(sample_i)); Scalar weight = samples.weights(sample_i) * posterior(component_i); #if TANGENT_DEBUG == 1 if(weight == 0.f) { std::cerr << "Zero weight * posterior: " << samples.weights(sample_i) << " * " << posterior(component_i) << " from component " << component_i << ", with weight: " << distribution.weights()[component_i] << ", and covariance determinant: " << components[component_i].cov().determinant() << "\n"; continue; } #endif // TANGENT_DEBUG == 1 stats.weights[component_i] += weight; stats.means[component_i] += weight * tangentVectors.col(component_i); stats.covs[component_i] += weight * tangentVectors.col(component_i) * tangentVectors.col(component_i).transpose(); #if SPLIT_AND_MERGE == 1 if(weight > 0 && pdf(component_i) > 0) { Scalar samplingPdf = samples.samplingPdfs(sample_i); Scalar Li = samples.weights(sample_i) * samplingPdf; Scalar LiNormalized = Li * weightNormalization; stats.densitySum[component_i] += LiNormalized * LiNormalized * posterior(component_i) / (pdf(component_i) * samplingPdf) - 1; // stats.densitySum[component_i] += std::abs(weight * weightNormalization - pdf(component_i)); // stats.logDensitySum[component_i] += std::log(weight * weightNormalization); // stats.logPdfSum[component_i] += weight * weightNormalization * std::log(pdf(component_i)); } #endif // SPLIT_AND_MERGE == 1 } } } void calculateStatsPrune( MM& distribution, Samples<t_dims, Scalar>& samples, bool countSamples, SDMMParams<Scalar, t_statDims, t_statDims>& stats, jmm::aligned_vector<Scalar>& samplesPerComponent ) { Eigen::Matrix<Scalar, Eigen::Dynamic, 1> posterior(distribution.nComponents(), 1); Eigen::Matrix< Scalar, Component::t_jointTangentDims, Eigen::Dynamic > tangentVectors(Component::t_jointTangentDims, distribution.nComponents()); Eigen::Matrix<int, Eigen::Dynamic, 1> posteriorIndices(distribution.nComponents(), 1); int posteriorLastIdx; #pragma omp for for(int sample_i = 0; sample_i < samples.size(); ++sample_i) { if(!isValidSample(samples, sample_i)) { continue; } if(samples.weights(sample_i) == 0) { continue; } Vectord sample = samples.samples.col(sample_i); Scalar heuristicPosterior = 0; bool useHeuristic = samples.isDiffuse(sample_i); // distribution.posterior( // sample, // useHeuristic, // samples.heuristicPdfs(sample_i), // posterior, // heuristicPosterior // ); distribution.posteriorPruneAndLog( sample, useHeuristic, samples.heuristicPdfs(sample_i), posterior, tangentVectors, posteriorIndices, posteriorLastIdx, heuristicPosterior ); stats.heuristicWeight += samples.weights(sample_i) * heuristicPosterior; const auto& components = distribution.components(); for(int found_i = 0; found_i < posteriorLastIdx; ++found_i) { Scalar weight = samples.weights(sample_i) * posterior(found_i); int component_i = posteriorIndices(found_i); #if TANGENT_DEBUG == 1 if(weight == 0.f) { std::cerr << "Zero weight * posterior: " << samples.weights(sample_i) << " * " << posterior(component_i) << " from component " << component_i << ", with weight: " << distribution.weights()[component_i] << ", and covariance determinant: " << components[component_i].cov().determinant() << "\n"; continue; } #endif // TANGENT_DEBUG == 1 // JointTangentVectord tangentSample; // Scalar jacobian; // bool logSuccess = components[component_i].tangentSpace().log(sample, tangentSample, jacobian); // if(!logSuccess) { // continue; // } // bool isInside = // components[component_i].isInside(samples.samples.col(sample_i), 0.95); // if(countSamples) { // samplesPerComponent[component_i] += isInside ? 1 : 0; // } stats.weights[component_i] += weight; stats.means[component_i] += weight * tangentVectors.col(found_i); stats.covs[component_i] += weight * tangentVectors.col(found_i) * tangentVectors.col(found_i).transpose(); } } } bool isValidSample( const Samples<t_dims, Scalar>& samples, int sample_i, bool warn=true ) { if(std::isfinite(samples.weights(sample_i))) { return true; } if(warn) { std::cerr << "inf or nan sample, id=" << sample_i << ", value=" << samples.weights(sample_i) << '\n'; } return false; } Scalar sumWeights(const Samples<t_dims, Scalar>& samples) { Scalar weightSum = 0.f; #pragma omp parallel num_threads(1) { #pragma omp for reduction(+: weightSum) for(int sample_i = 0; sample_i < samples.size(); ++sample_i) { if(!isValidSample(samples, sample_i)) { continue; } weightSum += samples.weights(sample_i); } } return weightSum; } struct SDMMIndividualParams { Scalar weight; JointTangentVectord mean; Matrixd cov; Component distribution() { return Component(mean, cov); } }; constexpr static Scalar weightSplitWeight = 0.5; std::pair<SDMMIndividualParams, SDMMIndividualParams> splitSVD( const SDMMIndividualParams& gaussian ) { constexpr static Scalar u = 0.5; constexpr static Scalar beta = 0.5; constexpr static int l = 0; constexpr static Scalar mean_j_const = std::sqrt((1 - weightSplitWeight) / weightSplitWeight) * u; constexpr static Scalar mean_k_const = std::sqrt(weightSplitWeight / (1 - weightSplitWeight)) * u; Scalar weight = gaussian.weight; Matrixd cov = gaussian.cov; const Eigen::JacobiSVD<Matrixd> svd(cov, Eigen::ComputeFullU | Eigen::ComputeFullV); Matrixd A = svd.matrixU() * svd.singularValues().cwiseSqrt().asDiagonal(); JointTangentVectord a_l = A.col(l); a_l.topRows(3).setZero(); std::cerr << a_l.transpose() << "\n"; Scalar weight_j = weightSplitWeight * weight; Scalar weight_k = (1 - weightSplitWeight) * weight; Scalar sqrt_k_over_j = std::sqrt(weight_k / weight_j); Scalar sqrt_j_over_k = std::sqrt(weight_j / weight_k); JointTangentVectord mean_j = gaussian.mean - mean_j_const * a_l; JointTangentVectord mean_k = gaussian.mean + mean_k_const * a_l; Matrixd cov_j = (1 - weightSplitWeight) / weightSplitWeight * cov + ( (beta - beta * u * u - 1) / weightSplitWeight + 1 ) * a_l * a_l.transpose(); Matrixd cov_k = weightSplitWeight / (1 - weightSplitWeight) * cov + ( (beta * u * u - beta - u * u) / (1 - weightSplitWeight) + 1 ) * a_l * a_l.transpose(); return { {weight_j, mean_j, cov_j}, {weight_k, mean_k, cov_k} }; } void splitStatsSVD(MM& distribution, int statIdx) { std::cerr << "Splitting component " << statIdx << ".\n"; Scalar weight = distribution.weights()[statIdx]; const Component& component = distribution.components()[statIdx]; const auto& splits_pair = splitSVD( {weight, component.tangentMean(), component.cov()} ); SDMMIndividualParams splits[2] = {splits_pair.first, splits_pair.second}; std::cerr << "Increasing nComponents to " << distribution.nComponents() + 1 << '\n'; assert(distribution.nComponents() + 1 < t_components); distribution.setNComponents(distribution.nComponents() + 1); int j = statIdx; int k = distribution.nComponents() - 1; const Scalar weightStat = statsGlobal.weights[statIdx]; statsGlobal.weights[j] = weightSplitWeight * weightStat; statsGlobal.weights[k] = (1 - weightSplitWeight) * weightStat; for(int split_i = 0; split_i < 2; ++split_i) { int component_i = (split_i == 0) ? j : k; Scalar decreasedBPrior = bPrior / (Scalar) (iterationsRunForMixture[component_i] + 1); // Vectord bPriorDiag = Vectord::Constant(decreasedBPrior); newParams.weights[component_i] = splits[split_i].weight; newParams.means[component_i] = splits[split_i].mean; newParams.covs[component_i] = splits[split_i].cov; // newParams.covs[component_i] += ( // bPriorDiag * // totalWeightForMixture[component_i] / // statsGlobal.weights[component_i] // ).asDiagonal(); distribution.weights()[component_i] = newParams.weights[component_i]; std::cerr << "Setting component " << component_i << " to: " << newParams.weights[component_i] << ".\n"; Vectord embeddedMean; Scalar expJacobianDet; bool success = distribution.components()[component_i].tangentSpace().exp( newParams.means[component_i], embeddedMean, expJacobianDet ); assert(success); distribution.components()[component_i].set( embeddedMean, newParams.covs[component_i] ); statsGlobal.means[component_i] = statsGlobal.weights[component_i] * newParams.means[component_i]; statsGlobal.covs[component_i] = statsGlobal.weights[component_i] * ( newParams.covs[component_i] + newParams.means[component_i] * newParams.means[component_i].transpose() ); // compareNewAndStats(component_i); } bool successfulCdfCreation = distribution.createCdf(true); } void optimize( MM& distribution, Samples<t_dims, Scalar>& samples, Scalar& maxError ) { int componentBegin = 0; int componentEnd = distribution.nComponents(); // Sum up the weights Scalar weightSum = sumWeights(samples); if(weightSum == 0) { return; } #pragma omp parallel num_threads(1) { #if TANGENT_DEBUG == 1 #pragma omp critical { std::cerr << "Optimizer threadID=" << omp_get_thread_num() << "\n"; } #pragma omp single { std::cerr << "Weights sum: " << weightSum << "\n"; } #endif jmm::aligned_vector<Scalar> eta_i(t_components); Scalar heuristicEta = 0; SDMMParams<Scalar, t_statDims, t_statDims> stats(t_components); jmm::aligned_vector<Scalar> samplesPerComponent(t_components); int iterations = 1; // if(iterationsRun < 3) { // iterations = 2; // } for(int emIt = 0; emIt < iterations; ++emIt) { #pragma omp barrier #pragma omp single { newParams.setZero(); std::fill( samplesPerComponentInIterationGlobal.begin(), samplesPerComponentInIterationGlobal.end(), 0.f ); } stats.setZero(); std::fill( samplesPerComponent.begin(), samplesPerComponent.end(), 0.f ); #pragma omp barrier calculateStats( distribution, samples, emIt == 0, stats, samplesPerComponent, weightSum ); #pragma omp barrier #pragma omp critical { for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { samplesPerComponentGlobal[component_i] += samplesPerComponent[component_i]; samplesPerComponentInIterationGlobal[component_i] += samplesPerComponent[component_i]; sampleCountGlobal += samplesPerComponent[component_i]; } #if TANGENT_DEBUG == 1 std::cerr << "Thread ID=" << omp_get_thread_num() << " finished calculating stats." << " Sample count: " << samplesPerComponent[0] << "\n"; #endif } #pragma omp barrier Scalar learningRate = 0.2; heuristicEta = std::pow(learningRate * iterationsRun + 1, -alpha); for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { // If a mixture gets 0 samples in the first iteration, // eta_i interpolate between 0 and the new ss. // If we add the samples per component before this method here, // we risk that happening. // Simply deciding eta_i based on iterationsRunForMixture also doesn't work, // because then we don't accumulate anything from previous iterations since // (iterationsRunForMixture + 1)^-1 = 1. // TODO: COULD BE THE PROBLEM! Turn off after 3rd iteration or so? eta_i[component_i] = std::pow(learningRate * iterationsRun + 1, -alpha); } #pragma omp barrier #pragma omp single { #if TANGENT_DEBUG == 1 std::cerr << "eta_i=" << heuristicEta << '\n'; #endif // TANGENT_DEBUG == 1 heuristicTotalWeight *= (1.f - heuristicEta); heuristicTotalWeight += heuristicEta * weightSum; for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { totalWeightForMixture[component_i] *= (1 - eta_i[component_i]); totalWeightForMixture[component_i] += eta_i[component_i] * weightSum; } } #pragma omp barrier #pragma omp single { statsGlobal *= (1.f - heuristicEta); } #pragma omp barrier #pragma omp critical { stats.sumProductInto(statsGlobal, heuristicEta); #if SPLIT_AND_MERGE == 1 stats.sumErrorStatsInto(statsGlobal); #endif // SPLIT_AND_MERGE == 1 } #pragma omp barrier #pragma omp single { auto& components = distribution.components(); // Normalize distribution. distribution.setNormalization( (1.f - heuristicEta) * distribution.normalization() + heuristicEta * weightSum / (Scalar) samples.size() ); Scalar invTotalWeight = 1.f / heuristicTotalWeight; statsGlobal.normalize(statsGlobalNormalized, invTotalWeight); int weakGaussiansCount = 0; int degenerateWeightsCount = 0; int degenerateGaussiansCount = 0; int untrainedGaussiansCount = 0; auto killComponent = [&](int component_i) -> void { newParams.weights[component_i] = 0; statsGlobal.weights[component_i] = 0; }; Scalar invGlobalDecreaseFactor = 1.f / Scalar(std::pow((Scalar) 3, (Scalar) std::min(trainingCutoff, iterationsRun))); newParams.heuristicWeight = niPriorMinusOne * invGlobalDecreaseFactor + statsGlobalNormalized.heuristicWeight; // #pragma omp for for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { Scalar decreasedNiPriorMinusOne = niPriorMinusOne; Scalar decreasedApriorMinusTwo = 100.f / (Scalar) distribution.nComponents(); Matrixd decreasedBPrior = decreasedApriorMinusTwo * bPriors[component_i]; Scalar invMixtureDecreaseFactor = 1.f / std::pow((Scalar) 2, (Scalar) std::min(trainingCutoff, iterationsRun)); if(decreasePrior) { decreasedBPrior = decreasedBPrior * invMixtureDecreaseFactor; decreasedApriorMinusTwo = decreasedApriorMinusTwo * invMixtureDecreaseFactor; decreasedNiPriorMinusOne = niPriorMinusOne * invGlobalDecreaseFactor; } Scalar invWeightStatGlobal = 1.f / statsGlobalNormalized.weights[component_i]; Scalar invMatrixNormalization = 1.f / (0.05 * decreasedApriorMinusTwo + statsGlobalNormalized.weights[component_i]); // THIS SHOULD NEVER HAPPEN assert(isfinite(statsGlobalNormalized.weights[component_i])); assert(totalWeightForMixture[component_i] > 0); assert(isfinite(invMatrixNormalization)); // Dead components should stay dead: // Zero mixture weight means a zero posterior, a zero mean, and a zero covariance. // Equivalently, a zero weight component will never get if(distribution.weights()[component_i] == 0.f) { ++degenerateWeightsCount; newParams.weights[component_i] = 0.f; continue; } if(!std::isfinite(invWeightStatGlobal)) { ++weakGaussiansCount; #if TANGENT_DEBUG == 1 std::cerr << "!isfinite(invWeightStatGlobal) = 1.f / " << statsGlobalNormalized.weights[component_i] << std::endl; #endif // TANGENT_DEBUG == 1 newParams.weights[component_i] = decreasedNiPriorMinusOne + statsGlobalNormalized.weights[component_i]; continue; } // Only allow components to wake up in the first few iterations. // Otherwise very likely to be garbage. if(samplesPerComponentGlobal[component_i] < trainingBatch && iterationsRun < 3) { ++untrainedGaussiansCount; newParams.weights[component_i] = (iterationsRun < trainingCutoff) ? decreasedNiPriorMinusOne + statsGlobalNormalized.weights[component_i] : 0; continue; } newParams.weights[component_i] = decreasedNiPriorMinusOne + statsGlobalNormalized.weights[component_i]; newParams.means[component_i] = statsGlobalNormalized.means[component_i] * invWeightStatGlobal; newParams.covs[component_i] = statsGlobalNormalized.covs[component_i] - statsGlobalNormalized.means[component_i] * newParams.means[component_i].transpose(); auto dumpDebugInfo = [&](const std::string& error, int component_i) { std::cerr << error << ": " << component_i << ":\n" << "weightStatsGlobal=" << statsGlobal.weights[component_i] << "\n" << "statsGlobalNormalized.weights=" << statsGlobalNormalized.weights[component_i] << "\n" << "invWeightStatGlobal=" << invWeightStatGlobal << "\n" << "invMatrixNormalization=" << invMatrixNormalization << "\n" << newParams.covs[component_i] << "\n decreasedBPrior:\n" << decreasedBPrior << "\n = decreasedAPrior: " << decreasedApriorMinusTwo << " * bPriors:\n" << bPriors[component_i] << "\n + bDepthPriors:\n" << bDepthPriors[component_i] << '\n'; }; #if TANGENT_DEBUG == 1 dumpDebugInfo("OPTIMIZATION DEBUG", component_i); #endif // TANGENT_DEBUG == 1 newParams.covs[component_i] += decreasedBPrior; newParams.covs[component_i] *= invMatrixNormalization; if(t_dims == 6) { newParams.covs[component_i].topLeftCorner(3, 3) += bDepthPriors[component_i]; } Vectord embeddedMean; Scalar expJacobianDet; bool success = components[component_i].tangentSpace().exp( newParams.means[component_i], embeddedMean, expJacobianDet ); assert(success); Matrixd jointJacobian = Matrixd::Identity(); Matrixd jointInvJacobian = Matrixd::Identity(); if(jacobianCorrection) { const TangentSpaced& oldTangentSpace = components[component_i].tangentSpace(); TangentSpaced newTangentSpace(embeddedMean); Eigen::Matrix<Scalar, 3, 2> expJacobian = oldTangentSpace.expJacobian( newParams.means[component_i].bottomRows(2) ); Eigen::Matrix<Scalar, 2, 3> logJacobian = newTangentSpace.logJacobian( embeddedMean.bottomRows(3) ); Eigen::Matrix<Scalar, 2, 1> meanNewDir = newParams.means[component_i].bottomRows(2).normalized(); Eigen::Matrix<Scalar, 2, 1> meanNewPerpDir; meanNewPerpDir << -meanNewDir[1], meanNewDir[0]; Eigen::Matrix<Scalar, 2, 2> hackobian = meanNewDir * meanNewDir.transpose() + expJacobianDet * meanNewPerpDir * meanNewPerpDir.transpose(); jointJacobian.bottomRightCorner(2, 2) = logJacobian * newTangentSpace.invRotation() * oldTangentSpace.rotation() * expJacobian; #if TANGENT_DEBUG == 1 Eigen::Matrix<Scalar, 2, 3> invExpJacobian = oldTangentSpace.logJacobian( embeddedMean.bottomRows(3) ); Eigen::Matrix<Scalar, 3, 2> invLogJacobian = newTangentSpace.expJacobian( {0.f, 0.f} ); jointInvJacobian.bottomRightCorner(2, 2) = invExpJacobian * oldTangentSpace.invRotation() * newTangentSpace.rotation() * invLogJacobian; std::cerr << "EXP JACOBIAN VALIDATION:\n" << expJacobian << "\n vs \n" << invExpJacobian << "\n=\n" << invExpJacobian * expJacobian << "\n" << "LOG JACOBIAN VALIDATION:\n" << logJacobian << "\n vs \n" << invLogJacobian << "\n" << "JOINT JACOBIAN VALIDATION:\n" << jointJacobian << "\n vs \n" << jointInvJacobian << "\n vs \n" << hackobian << "\n" ; #endif // TANGENT_DEBUG == 1 } newParams.covs[component_i] = jointJacobian * newParams.covs[component_i] * jointJacobian.transpose(); #if TANGENT_DEBUG == 1 std::cerr << "Mean " << component_i << ": " << newParams.means[component_i].transpose() << ", embedded: " << embeddedMean.transpose() << ", jacobian: " << jacobian << ", newParams.covs det: " << newParams.covs[component_i].determinant() << "\n"; std::cerr << "Joint jacobian matrix det: " << jointJacobian.determinant() << " vs. " << expJacobianDet << '\n'; #endif // TANGENT_DEBUG == 1 if(!isPositiveDefinite(newParams.covs[component_i])) { dumpDebugInfo("Non-PD Matrix", component_i); Eigen::Matrix<Scalar, 3, 3> spatial = newParams.covs[component_i].topLeftCorner(3, 3); Eigen::Matrix<Scalar, 2, 2> directional = newParams.covs[component_i].bottomRightCorner(2, 2); if(!isPositiveDefinite(spatial)) { std::cerr << "Non-PD Spatial Matrix:\n" << spatial << std::endl; } if(!isPositiveDefinite(directional)) { std::cerr << "Non-PD Directional Matrix:\n" << directional << std::endl; } ++degenerateGaussiansCount; newParams.weights[component_i] = 0.f; continue; } components[component_i].set( embeddedMean, newParams.covs[component_i] ); statsGlobalNormalized.covs[component_i] -= statsGlobalNormalized.means[component_i] * newParams.means[component_i].transpose(); JointTangentVectord conditionMeanStat = statsGlobalNormalized.means[component_i]; conditionMeanStat.template bottomRows<Component::t_tangentDims>().setZero(); JointTangentVectord conditionMeanNew = newParams.means[component_i]; conditionMeanNew.template bottomRows<Component::t_tangentDims>().setZero(); statsGlobalNormalized.covs[component_i] += conditionMeanStat * conditionMeanNew.transpose(); statsGlobalNormalized.covs[component_i] = jointJacobian * statsGlobalNormalized.covs[component_i] * jointJacobian.transpose(); statsGlobal.covs[component_i] = statsGlobalNormalized.covs[component_i] * totalWeightForMixture[component_i]; statsGlobal.means[component_i].template bottomRows<Component::t_tangentDims>().setZero(); } // Copy new distributions // #if TANGENT_DEBUG == 1 if(weakGaussiansCount > 0) { std::cerr << "weakGaussiansCount=" << weakGaussiansCount << '\n'; std::cerr << "degenerateWeightsCount=" << degenerateWeightsCount << '\n'; std::cerr << "degenerateGaussiansCount=" << degenerateGaussiansCount << '\n'; std::cerr << "untrainedGaussiansCount=" << untrainedGaussiansCount << '\n'; } // #endif // TANGENT_DEBUG == 1 Scalar pdfNorm = std::accumulate( std::begin(newParams.weights) + componentBegin, std::begin(newParams.weights) + componentEnd, 0.f ) + newParams.heuristicWeight; #if TANGENT_DEBUG == 1 std::cerr << "heuristicWeightNew=" << heuristicWeightNew / pdfNorm << std::endl; #endif // TANGENT_DEBUG == 1 jmm::normalizePdf( std::begin(newParams.weights) + componentBegin, std::begin(newParams.weights) + componentEnd ); std::copy( std::begin(newParams.weights) + componentBegin, std::begin(newParams.weights) + componentEnd, std::begin(distribution.weights()) + componentBegin ); #if TANGENT_DEBUG == 1 std::cerr << "newParams.weights = ["; for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { std::cerr << newParams.weights[component_i] << ", "; } std::cerr << "]\n"; #endif // TANGENT_DEBUG == 1 // if(iterationsRun > 3) { // jmm::aligned_vector<Scalar> error(t_components); // statsGlobal.calculateError(error, samples.size()); // maxError = *std::max_element(error.begin(), error.begin() + componentEnd); // distribution.setModelError(maxError); // for(int error_i = 0; error_i < componentEnd; ++error_i) { // if(error[error_i] > 100 && distribution.nComponents() < t_components - 1) { // splitStatsSVD(distribution, error_i); // } // } // } bool successfulCdfCreation = distribution.createCdf(false); distribution.configure(); assert(successfulCdfCreation); for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { if(samplesPerComponentGlobal[component_i] > 0) { startedTraining[component_i] = true; } } for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { if(samplesPerComponentGlobal[component_i] >= trainingBatch) { ++iterationsRunForMixture[component_i]; } } ++iterationsRun; } #pragma omp barrier } } } }; } #endif /* __MIXTURE_MODEL_OPT_STEPWISE_TANGENT_H */
declare7.c
/* Example of the inbranch and notinbranch clauses on the declare simd directive The inbranch clause tells the compiler to generate a SIMD variant of the function do mult() that must be called conditionally within a SIMD loop. The notinbranch clause on the declaration of the do pow() function tells the compiler that there is a SIMD variant of the function that must be called unconditionally within a SIMD loop. */ #pragma omp declare simd inbranch float do_mult(float x) { return (-2.0*x); } #pragma omp declare simd notinbranch extern float do_pow(float); void simd_loop_with_branch(float *a, float *b, int n) { #pragma omp simd for (int i=0; i<n; i++) { if (a[i] < 0.0 ) b[i] = do_mult(a[i]); b[i] = do_pow(b[i]); } /* --- end simd region --- */ }
SOCRSStorage.h
// // Created by iskakoff on 28/07/16. // #ifndef HUBBARD_SOCRSSTORAGE_H #define HUBBARD_SOCRSSTORAGE_H #include <vector> #include <iomanip> #ifdef _OPENMP #include <omp.h> #endif #include "Storage.h" namespace EDLib { namespace Storage { template<class ModelType> class SOCRSStorage : public Storage < typename ModelType::precision > { typedef typename ModelType::precision prec; public: typedef ModelType Model; using Storage < prec >::n; using Storage < prec >::ntot; #ifdef USE_MPI SOCRSStorage(alps::params &p, Model &m, alps::mpi::communicator &comm) : Storage < prec >(p, comm), #else SOCRSStorage(alps::params &p, Model &m) : Storage < prec >(p), #endif _max_size(p["storage.MAX_SIZE"]), #ifdef _OPENMP _nthreads(omp_get_max_threads()), #else _nthreads(1), #endif _row_offset(_nthreads + 1), _vind_offset(_nthreads + 1), _vind(_nthreads), _vind_byte(_nthreads), _vind_bit(_nthreads), _vind_start(_nthreads), _max_dim(p["storage.MAX_DIM"]), _model(m) { /** init what you need from parameters*/ col_ind.assign(_max_size, 0); // XXX I don't trust myself about this one: signs.assign(std::ceil(_max_size / sizeof(char)), 1); dvalues.assign(_max_dim, prec(0.0)); }; virtual void av(prec *v, prec *w, int n, bool clear = true) { _model.symmetry().init(); #ifdef _OPENMP #pragma omp parallel { int myid = omp_get_thread_num(); #else int myid = 0; #endif size_t _vind = _vind_offset[myid]; size_t _vind_byte = _vind / sizeof(char); size_t _vind_bit = _vind % sizeof(char); // Iteration over rows. for(int i = _row_offset[myid]; (i < _row_offset[myid + 1]) && (i < n); ++i){ long long nst = _model.symmetry().state_by_index(i); // Diagonal contribution. w[i] = dvalues[i] * v[i] + (clear ? 0.0 : w[i]); // Offdiagonal contribution. // Iteration over columns(unordered). for (int kkk = 0; kkk < _model.T_states().size(); ++kkk) { int test = _model.valid(_model.T_states()[kkk], nst); // If transition between states corresponding to row and column is possible, calculate the offdiagonal element. w[i] += test * _model.T_states()[kkk].value() * (1 - 2 * ((signs[_vind_byte] >> _vind_bit) & 1)) * v[col_ind[_vind]]; _vind_bit += test; _vind_byte += _vind_bit / sizeof(char); _vind_bit %= sizeof(char); _vind += test; } for (int kkk = 0; kkk < _model.V_states().size(); ++kkk) { int test = _model.valid(_model.V_states()[kkk], nst); // If transition between states corresponding to row and column is possible, calculate the offdiagonal element. w[i] += test * _model.V_states()[kkk].value() * (1 - 2 * ((signs[_vind_byte] >> _vind_bit) & 1)) * v[col_ind[_vind]]; _vind_bit += test; _vind_byte += _vind_bit / sizeof(char); _vind_bit %= sizeof(char); _vind += test; } } #ifdef _OPENMP } #endif } void init() { _model.symmetry().init(); } void reset() { _model.symmetry().init(); size_t sector_size = _model.symmetry().sector().size(); if (sector_size > _max_dim) { std::stringstream s; s << "New sector request more memory than allocated. Increase MAX_DIM parameter. Requested " << sector_size << ", allocated " << _max_dim << "."; throw std::runtime_error(s.str().c_str()); } if (sector_size * (_model.T_states().size() + _model.V_states().size()) > _max_size) { std::stringstream s; s << "New sector request more memory than allocated. Increase MAX_SIZE parameter. Requested " << sector_size * (_model.T_states().size() + _model.V_states().size()) << ", allocated " << _max_size << "."; throw std::runtime_error(s.str().c_str()); } for(int myid = 0; myid < _nthreads; ++myid){ _vind[myid] = 0; _vind_byte[myid] = 0; _vind_bit[myid] = 0; } ntot() = sector_size; n() = ntot(); } void fill() { reset(); int i = 0; long long k = 0; int isign = 0; // Size chunks equally. int step = (int)std::floor(_model.symmetry().sector().size() / _nthreads); for (int i = 0; i <= _nthreads; i++){ _row_offset[i] = step * i; } // Put the rest into some of the first threads. int more = _model.symmetry().sector().size() - _row_offset[_nthreads]; for (int i = 0; i < more; i++){ _row_offset[i] += i; } for (int i = more; i <= _nthreads; i++){ _row_offset[i] += more; } for(int myid = 0; myid <= _nthreads; ++myid){ _vind_offset[myid] = (_model.T_states().size() + _model.V_states().size()) * _row_offset[myid]; } #ifdef _OPENMP #pragma omp parallel { int myid = omp_get_thread_num(); #else int myid = 0; #endif // Variant: serial, but more compact. // for (int myid = 0; myid < _nthreads; ++myid){ _vind[myid] = _vind_offset[myid]; _vind_byte[myid] = _vind[myid] / sizeof(char); _vind_bit[myid] = _vind[myid] % sizeof(char); for (int i = _row_offset[myid]; i < _row_offset[myid + 1]; ++i) { long long nst = _model.symmetry().state_by_index(i); // Compute diagonal element for current i state addDiagonal(i, _model.diagonal(nst), myid); // non-diagonal terms calculation off_diagonal < decltype(_model.T_states()) >(nst, i, _model.T_states(), myid); off_diagonal < decltype(_model.V_states()) >(nst, i, _model.V_states(), myid); } // _vind_offset[myid + 1] = _vind[myid]; #ifdef _OPENMP } #endif // } } void print() { // See: av(). // Each row of the matrix is first restored from the arrays. std::vector < prec > line(n(), prec(0.0)); _model.symmetry().init(); std::cout << std::setprecision(2) << std::fixed; std::cout << "["; for (int myid = 0; myid < _nthreads; ++myid) { size_t _vind = _vind_offset[myid]; size_t _vind_byte = _vind / sizeof(char); size_t _vind_bit = _vind % sizeof(char); for (int i = _row_offset[myid]; i < _row_offset[myid + 1]; ++i) { _model.symmetry().next_state(); long long nst = _model.symmetry().state(); std::fill(line.begin(), line.end(), prec(0.0)); line[i] = dvalues[i]; for (int kkk = 0; kkk < _model.T_states().size(); ++kkk) { int test = _model.valid(_model.T_states()[kkk], nst); line[col_ind[_vind]] += test * _model.T_states()[kkk].value() * (1 - 2 * ((signs[_vind_byte] >> _vind_bit) & 1)); _vind_bit += test; _vind_byte += _vind_bit / sizeof(char); _vind_bit %= sizeof(char); _vind += test; } for (int kkk = 0; kkk < _model.V_states().size(); ++kkk) { int test = _model.valid(_model.V_states()[kkk], nst); line[col_ind[_vind]] += test * _model.V_states()[kkk].value() * (1 - 2 * ((signs[_vind_byte] >> _vind_bit) & 1)); _vind_bit += test; _vind_byte += _vind_bit / sizeof(char); _vind_bit %= sizeof(char); _vind += test; } std::cout << "["; for (int j = 0; j < n(); ++j) { std::cout << std::setw(6) << line[j] << (j == n() - 1 ? "" : ", "); } std::cout << "]" << (i == n() - 1 ? "" : ", \n"); } std::cout << "]" << std::endl; } } virtual void zero_eigenapair() { Storage < prec >::eigenvalues().resize(1); Storage < prec >::eigenvalues()[0] = dvalues[0]; Storage < prec >::eigenvectors().assign(1, std::vector < prec >(1, prec(1.0))); } size_t vector_size(typename Model::Sector sector) { return sector.size(); } #ifdef USE_MPI prec vv(const std::vector<prec> & v, const std::vector<prec> & w, MPI_Comm com) { return vv(v, w); } #endif prec vv(const std::vector<prec> & v, const std::vector<prec> & w) { prec alf = prec(0.0); for (int k = 0; k < v.size(); ++k) { alf += w[k] * v[k]; } return alf; } void a_adag(int iii, const std::vector < prec > &invec, std::vector < prec > &outvec, const typename Model::Sector& next_sec, bool a) { long long k; int sign; int i = 0; while (_model.symmetry().next_state()) { long long nst = _model.symmetry().state(); if (_model.checkState(nst, iii, _model.max_total_electrons()) == (a ? 1 : 0)) { if(a) _model.a(iii, nst, k, sign); else _model.adag(iii, nst, k, sign); int i1 = _model.symmetry().index(k, next_sec); outvec[i1] = sign * invec[i]; } ++i; }; } #ifdef _OPENMP int &nprocs() { return _nthreads; } #endif private: // Internal storage structure std::vector < prec > dvalues; std::vector < int > col_ind; std::vector < char > signs; // the maximum sizes of all the objects size_t _max_size; size_t _max_dim; // number of OMP threads, "1" for serial mode. int _nthreads; // OMP chunks offset std::vector < size_t > _row_offset; std::vector < size_t > _vind_offset; // internal indicies, only used for filling std::vector < size_t > _vind; // start of current row, used for checks std::vector < size_t > _vind_start; // bit and byte of the bitmap corresponding to CRS index std::vector < size_t > _vind_bit; std::vector < size_t > _vind_byte; // Hubbard model parameters Model &_model; template<typename T_states> inline void off_diagonal(long long nst, int i, T_states& states, int chunk) { long long k = 0; int isign = 0; for (int kkk = 0; kkk < states.size(); ++kkk) { if (_model.valid(states[kkk], nst)) { _model.set(states[kkk], nst, k, isign); int k_index = _model.symmetry().index(k); addElement(i, k_index, states[kkk].value(), isign, chunk); } } }; /** * Add diagonal H(i,i) element with value v. */ void inline addDiagonal(int i, prec v, int chunk) { dvalues[i] = v; _vind_start[chunk] = _vind[chunk]; } /** * Add off-diagonal H(i,j) element with value t (discarded here, restored in av) and Fermi sign. */ void inline addElement(const int &i, int j, prec t, int sign, int chunk) { if (i == j) { throw std::logic_error("Attempt to use addElement() to add diagonal element. Use addDiagonal() instead!"); } // It is an error to add the element (i, j) twice. for (size_t iii = _vind_start[chunk]; iii < _vind[chunk]; iii++) { if (col_ind[iii] == j) { throw std::logic_error("Collision. Check a, adag, numState, ninv_value!"); } } if (_vind[chunk] >= _vind_offset[chunk+1]) { std::stringstream s; s << "Current sector request more memory than allocated. Increase MAX_SIZE parameter."; throw std::runtime_error(s.str().c_str()); } // Store sign in CRS-like array, one bit per sign. col_ind[_vind[chunk]] = j; signs[_vind_byte[chunk]] &= ~(1ll << _vind_bit[chunk]); signs[_vind_byte[chunk]] |= sign < 0 ? 1ll << _vind_bit[chunk] : 0; ++_vind_bit[chunk]; ++_vind[chunk]; _vind_byte[chunk] += _vind_bit[chunk] / sizeof(char); _vind_bit[chunk] %= sizeof(char); } }; } } #endif //HUBBARD_SOCRSSTORAGE_H
3.taskloop.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> #include "omp.h" /* Q1: Execute the program several times and make sure you are able */ /* to explain when each thread in the threads team is actually */ /* contributing to the execution of work (tasks) generated in the */ /* taskloop. */ void long_running_task(int value) { printf("Thread %d going to sleep for %d seconds\n", omp_get_thread_num(), value); sleep(value); printf("Thread %d weaking up after a %d seconds siesta, willing to work ...\n", omp_get_thread_num(), value); } void loop_body(int i, int j) { printf("Thread %d executing loop body (%d, %d)\n", omp_get_thread_num(), i, j); sleep(1); } int main(int argc, char *argv[]) { #pragma omp parallel num_threads(4) #pragma omp single { printf("I am thread %d and going to create T1 and T2\n", omp_get_thread_num()); #pragma omp task // Task T1 long_running_task(5); #pragma omp task // Task T2 { #pragma omp task // Task T3 long_running_task(10); // can execute concurrently #pragma omp task // Task T4 { #pragma omp taskloop grainsize(1) nogroup // Tasks TL for (long i = 0; i < 10; i++) for (long j = 0; j < i; j++) loop_body(i, j); printf("Thread %d finished the creation of all tasks in taskloop TL\n", omp_get_thread_num()); } printf("Thread %d finished the execution of task creating T3 and T4\n", omp_get_thread_num()); } printf("I am still thread %d after creating T1 and T2, ready to enter in the taskwait\n", omp_get_thread_num()); #pragma omp taskwait printf("I am still thread %d, but now after exiting from the taskwait\n", omp_get_thread_num()); } return 0; }
DRB072-taskdep1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two tasks with depend clause to ensure execution order: i is shared for two tasks based on implicit data-sharing attribute rules. */ #include "omprace.h" #include <omp.h> #include <assert.h> int main() { omprace_init(); int i=0; #pragma omp parallel #pragma omp single { #pragma omp task depend (out:i) i = 1; #pragma omp task depend (in:i) i = 2; } assert (i==2); omprace_fini(); return 0; }
simd-15.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ static inline void foo (int *b, int *i, int *j, int x) { *b = *b + x + (*i - *i) + (*j - *j); } int main () { int i, j, b, c = 0; i = 4; j = 4; b = 7; #pragma omp simd linear(b:2) reduction(+:c) for (i = 0; i < 64; i++) { c = c + (b != 7 + 2 * i); foo (&b, &i, &j, 2); } if (c || i != 64 || b != 7 + 64 * 2) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp simd linear(b:3) reduction(+:c) for (i = 0; i < 64; i += 4) { c = c + (b != 7 + i / 4 * 3); foo (&b, &i, &j, 3); } if (c || i != 64 || b != 7 + 16 * 3) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp simd linear(i) linear(b:2) reduction(+:c) for (i = 0; i < 64; i++) { c = c + (b != 7 + 2 * i); foo (&b, &i, &j, 2); } if (c || i != 64 || b != 7 + 64 * 2) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp simd linear(i:4) linear(b:3) reduction(+:c) for (i = 0; i < 64; i += 4) { c = c + (b != 7 + i / 4 * 3); foo (&b, &i, &j, 3); } if (c || i != 64 || b != 7 + 16 * 3) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp simd collapse (2) linear(b:2) reduction(+:c) for (i = 0; i < 8; i++) for (j = 0; j < 8; j++) { c = c + (b != 7 + 2 * j + 2 * 8 * i); foo (&b, &i, &j, 2); } if (c || i != 8 || j != 8 || b != 7 + 64 * 2) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp simd collapse (2) lastprivate (i, j) linear(b:2) reduction(+:c) for (i = 0; i < 8; i++) for (j = 0; j < 8; j++) { c = c + (b != 7 + 2 * j + 2 * 8 * i); foo (&b, &i, &j, 2); } if (c || i != 8 || j != 8 || b != 7 + 64 * 2) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c) for (i = 0; i < 64; i++) { c = c + (b != 7 + 2 * i); foo (&b, &i, &j, 2); } if (c || i != 64 || b != 7 + 64 * 2) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c) for (i = 0; i < 64; i += 4) { c = c + (b != 7 + i / 4 * 3); foo (&b, &i, &j, 3); } if (c || i != 64 || b != 7 + 16 * 3) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c) for (i = 0; i < 64; i++) { c = c + (b != 7 + 2 * i); foo (&b, &i, &j, 2); } if (c || i != 64 || b != 7 + 64 * 2) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c) for (i = 0; i < 64; i += 4) { c = c + (b != 7 + i / 4 * 3); foo (&b, &i, &j, 3); } if (c || i != 64 || b != 7 + 16 * 3) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c) for (i = 0; i < 8; i++) for (j = 0; j < 8; j++) { c = c + (b != 7 + 2 * j + 2 * 8 * i); foo (&b, &i, &j, 2); } if (c || i != 8 || j != 8 || b != 7 + 64 * 2) __builtin_abort (); i = 4; j = 4; b = 7; #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c) for (i = 0; i < 8; i++) for (j = 0; j < 8; j++) { c = c + (b != 7 + 2 * j + 2 * 8 * i); foo (&b, &i, &j, 2); } if (c || i != 8 || j != 8 || b != 7 + 64 * 2) __builtin_abort (); return 0; }