source
stringlengths
3
92
c
stringlengths
26
2.25M
convolution_3x3_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k40[k]; g00[5] = (__fp16)k50[k]; g00[6] = (__fp16)k60[k]; g00[7] = (__fp16)k70[k]; g00[8] = (__fp16)k01[k]; g00[9] = (__fp16)k11[k]; g00[10] = (__fp16)k21[k]; g00[11] = (__fp16)k31[k]; g00[12] = (__fp16)k41[k]; g00[13] = (__fp16)k51[k]; g00[14] = (__fp16)k61[k]; g00[15] = (__fp16)k71[k]; g00[16] = (__fp16)k02[k]; g00[17] = (__fp16)k12[k]; g00[18] = (__fp16)k22[k]; g00[19] = (__fp16)k32[k]; g00[20] = (__fp16)k42[k]; g00[21] = (__fp16)k52[k]; g00[22] = (__fp16)k62[k]; g00[23] = (__fp16)k72[k]; g00[24] = (__fp16)k03[k]; g00[25] = (__fp16)k13[k]; g00[26] = (__fp16)k23[k]; g00[27] = (__fp16)k33[k]; g00[28] = (__fp16)k43[k]; g00[29] = (__fp16)k53[k]; g00[30] = (__fp16)k63[k]; g00[31] = (__fp16)k73[k]; g00 += 32; } } } for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k01[k]; g00[5] = (__fp16)k11[k]; g00[6] = (__fp16)k21[k]; g00[7] = (__fp16)k31[k]; g00[8] = (__fp16)k02[k]; g00[9] = (__fp16)k12[k]; g00[10] = (__fp16)k22[k]; g00[11] = (__fp16)k32[k]; g00[12] = (__fp16)k03[k]; g00[13] = (__fp16)k13[k]; g00[14] = (__fp16)k23[k]; g00[15] = (__fp16)k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd63_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack4_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n" "st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v28.8h, v4.8h, v0.h[4] \n" "fmla v29.8h, v4.8h, v0.h[5] \n" "fmla v30.8h, v4.8h, v0.h[6] \n" "fmla v31.8h, v4.8h, v0.h[7] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v28.8h, v5.8h, v1.h[4] \n" "fmla v29.8h, v5.8h, v1.h[5] \n" "fmla v30.8h, v5.8h, v1.h[6] \n" "fmla v31.8h, v5.8h, v1.h[7] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "fmla v28.8h, v6.8h, v2.h[4] \n" "fmla v29.8h, v6.8h, v2.h[5] \n" "fmla v30.8h, v6.8h, v2.h[6] \n" "fmla v31.8h, v6.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "fmla v28.8h, v7.8h, v3.h[4] \n" "fmla v29.8h, v7.8h, v3.h[5] \n" "fmla v30.8h, v7.8h, v3.h[6] \n" "fmla v31.8h, v7.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16(0.f); for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); _sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3); kptr += 32; r0 += 4; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v28.4h, v4.4h, v0.h[4] \n" "fmla v29.4h, v4.4h, v0.h[5] \n" "fmla v30.4h, v4.4h, v0.h[6] \n" "fmla v31.4h, v4.4h, v0.h[7] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v28.4h, v5.4h, v1.h[4] \n" "fmla v29.4h, v5.4h, v1.h[5] \n" "fmla v30.4h, v5.4h, v1.h[6] \n" "fmla v31.4h, v5.4h, v1.h[7] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "fmla v28.4h, v6.4h, v2.h[4] \n" "fmla v29.4h, v6.4h, v2.h[5] \n" "fmla v30.4h, v6.4h, v2.h[6] \n" "fmla v31.4h, v6.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "fmla v28.4h, v7.4h, v3.h[4] \n" "fmla v29.4h, v7.4h, v3.h[5] \n" "fmla v30.4h, v7.4h, v3.h[6] \n" "fmla v31.4h, v7.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16(0.f); for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); _sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3); kptr += 16; r0 += 4; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_pack4_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f); out0.fill(_bias0); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0.row<__fp16>(0); const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* kptr = kernel.channel(p).row<const __fp16>(q); // 16 * 9 float16x8_t _k00_01 = vld1q_f16(kptr); float16x8_t _k00_23 = vld1q_f16(kptr + 8); float16x8_t _k01_01 = vld1q_f16(kptr + 16); float16x8_t _k01_23 = vld1q_f16(kptr + 24); float16x8_t _k02_01 = vld1q_f16(kptr + 32); float16x8_t _k02_23 = vld1q_f16(kptr + 40); float16x8_t _k10_01 = vld1q_f16(kptr + 48); float16x8_t _k10_23 = vld1q_f16(kptr + 56); float16x8_t _k11_01 = vld1q_f16(kptr + 64); float16x8_t _k11_23 = vld1q_f16(kptr + 72); float16x8_t _k12_01 = vld1q_f16(kptr + 80); float16x8_t _k12_23 = vld1q_f16(kptr + 88); float16x8_t _k20_01 = vld1q_f16(kptr + 96); float16x8_t _k20_23 = vld1q_f16(kptr + 104); float16x8_t _k21_01 = vld1q_f16(kptr + 112); float16x8_t _k21_23 = vld1q_f16(kptr + 120); float16x8_t _k22_01 = vld1q_f16(kptr + 128); float16x8_t _k22_23 = vld1q_f16(kptr + 136); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02 r03 r04 r05 "ext v6.16b, %8.16b, %8.16b, #8 \n" "fmla v10.4h, %8.4h, v0.h[0] \n" "fmla v11.4h, %8.4h, v0.h[4] \n" "fmla v12.4h, %8.4h, v1.h[0] \n" "fmla v13.4h, %8.4h, v1.h[4] \n" "fmla v10.4h, v6.4h, v0.h[1] \n" "fmla v11.4h, v6.4h, v0.h[5] \n" "fmla v12.4h, v6.4h, v1.h[1] \n" "fmla v13.4h, v6.4h, v1.h[5] \n" "ext v7.16b, %9.16b, %9.16b, #8 \n" "fmla v10.4h, %9.4h, v0.h[2] \n" "fmla v11.4h, %9.4h, v0.h[6] \n" "fmla v12.4h, %9.4h, v1.h[2] \n" "fmla v13.4h, %9.4h, v1.h[6] \n" "fmla v10.4h, v7.4h, v0.h[3] \n" "fmla v11.4h, v7.4h, v0.h[7] \n" "fmla v12.4h, v7.4h, v1.h[3] \n" "fmla v13.4h, v7.4h, v1.h[7] \n" "ext v8.16b, %10.16b, %10.16b, #8 \n" "fmla v10.4h, %10.4h, v0.h[4] \n" "fmla v11.4h, %10.4h, v1.h[0] \n" "fmla v12.4h, %10.4h, v1.h[4] \n" "fmla v13.4h, %10.4h, v2.h[0] \n" "fmla v10.4h, v8.4h, v0.h[5] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "fmla v12.4h, v8.4h, v1.h[5] \n" "fmla v13.4h, v8.4h, v2.h[1] \n" "ext v9.16b, %11.16b, %11.16b, #8 \n" "fmla v10.4h, %11.4h, v0.h[6] \n" "fmla v11.4h, %11.4h, v1.h[2] \n" "fmla v12.4h, %11.4h, v1.h[6] \n" "fmla v13.4h, %11.4h, v2.h[2] \n" "fmla v10.4h, v9.4h, v0.h[7] \n" "fmla v11.4h, v9.4h, v1.h[3] \n" "fmla v12.4h, v9.4h, v1.h[7] \n" "fmla v13.4h, v9.4h, v2.h[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12 r13 r14 r15 "ext v6.16b, %12.16b, %12.16b, #8 \n" "fmla v10.4h, %12.4h, v1.h[0] \n" "fmla v11.4h, %12.4h, v1.h[4] \n" "fmla v12.4h, %12.4h, v2.h[0] \n" "fmla v13.4h, %12.4h, v2.h[4] \n" "fmla v10.4h, v6.4h, v1.h[1] \n" "fmla v11.4h, v6.4h, v1.h[5] \n" "fmla v12.4h, v6.4h, v2.h[1] \n" "fmla v13.4h, v6.4h, v2.h[5] \n" "ext v7.16b, %13.16b, %13.16b, #8 \n" "fmla v10.4h, %13.4h, v1.h[2] \n" "fmla v11.4h, %13.4h, v1.h[6] \n" "fmla v12.4h, %13.4h, v2.h[2] \n" "fmla v13.4h, %13.4h, v2.h[6] \n" "fmla v10.4h, v7.4h, v1.h[3] \n" "fmla v11.4h, v7.4h, v1.h[7] \n" "fmla v12.4h, v7.4h, v2.h[3] \n" "fmla v13.4h, v7.4h, v2.h[7] \n" "ext v8.16b, %14.16b, %14.16b, #8 \n" "fmla v10.4h, %14.4h, v3.h[0] \n" "fmla v11.4h, %14.4h, v3.h[4] \n" "fmla v12.4h, %14.4h, v4.h[0] \n" "fmla v13.4h, %14.4h, v4.h[4] \n" "fmla v10.4h, v8.4h, v3.h[1] \n" "fmla v11.4h, v8.4h, v3.h[5] \n" "fmla v12.4h, v8.4h, v4.h[1] \n" "fmla v13.4h, v8.4h, v4.h[5] \n" "ext v9.16b, %15.16b, %15.16b, #8 \n" "fmla v10.4h, %15.4h, v3.h[2] \n" "fmla v11.4h, %15.4h, v3.h[6] \n" "fmla v12.4h, %15.4h, v4.h[2] \n" "fmla v13.4h, %15.4h, v4.h[6] \n" "fmla v10.4h, v9.4h, v3.h[3] \n" "fmla v11.4h, v9.4h, v3.h[7] \n" "fmla v12.4h, v9.4h, v4.h[3] \n" "fmla v13.4h, v9.4h, v4.h[7] \n" "ext v6.16b, %16.16b, %16.16b, #8 \n" "fmla v10.4h, %16.4h, v3.h[4] \n" "fmla v11.4h, %16.4h, v4.h[0] \n" "fmla v12.4h, %16.4h, v4.h[4] \n" "fmla v13.4h, %16.4h, v5.h[0] \n" "fmla v10.4h, v6.4h, v3.h[5] \n" "fmla v11.4h, v6.4h, v4.h[1] \n" "fmla v12.4h, v6.4h, v4.h[5] \n" "fmla v13.4h, v6.4h, v5.h[1] \n" "ext v7.16b, %17.16b, %17.16b, #8 \n" "fmla v10.4h, %17.4h, v3.h[6] \n" "fmla v11.4h, %17.4h, v4.h[2] \n" "fmla v12.4h, %17.4h, v4.h[6] \n" "fmla v13.4h, %17.4h, v5.h[2] \n" "fmla v10.4h, v7.4h, v3.h[7] \n" "fmla v11.4h, v7.4h, v4.h[3] \n" "fmla v12.4h, v7.4h, v4.h[7] \n" "fmla v13.4h, v7.4h, v5.h[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22 r23 r24 r25 "ext v8.16b, %18.16b, %18.16b, #8 \n" "fmla v10.4h, %18.4h, v4.h[0] \n" "fmla v11.4h, %18.4h, v4.h[4] \n" "fmla v12.4h, %18.4h, v5.h[0] \n" "fmla v13.4h, %18.4h, v5.h[4] \n" "fmla v10.4h, v8.4h, v4.h[1] \n" "fmla v11.4h, v8.4h, v4.h[5] \n" "fmla v12.4h, v8.4h, v5.h[1] \n" "fmla v13.4h, v8.4h, v5.h[5] \n" "ext v9.16b, %19.16b, %19.16b, #8 \n" "fmla v10.4h, %19.4h, v4.h[2] \n" "fmla v11.4h, %19.4h, v4.h[6] \n" "fmla v12.4h, %19.4h, v5.h[2] \n" "fmla v13.4h, %19.4h, v5.h[6] \n" "fmla v10.4h, v9.4h, v4.h[3] \n" "fmla v11.4h, v9.4h, v4.h[7] \n" "fmla v12.4h, v9.4h, v5.h[3] \n" "fmla v13.4h, v9.4h, v5.h[7] \n" "ext v6.16b, %20.16b, %20.16b, #8 \n" "fmla v10.4h, %20.4h, v0.h[0] \n" "fmla v11.4h, %20.4h, v0.h[4] \n" "fmla v12.4h, %20.4h, v1.h[0] \n" "fmla v13.4h, %20.4h, v1.h[4] \n" "fmla v10.4h, v6.4h, v0.h[1] \n" "fmla v11.4h, v6.4h, v0.h[5] \n" "fmla v12.4h, v6.4h, v1.h[1] \n" "fmla v13.4h, v6.4h, v1.h[5] \n" "ext v7.16b, %21.16b, %21.16b, #8 \n" "fmla v10.4h, %21.4h, v0.h[2] \n" "fmla v11.4h, %21.4h, v0.h[6] \n" "fmla v12.4h, %21.4h, v1.h[2] \n" "fmla v13.4h, %21.4h, v1.h[6] \n" "fmla v10.4h, v7.4h, v0.h[3] \n" "fmla v11.4h, v7.4h, v0.h[7] \n" "fmla v12.4h, v7.4h, v1.h[3] \n" "fmla v13.4h, v7.4h, v1.h[7] \n" "ext v8.16b, %22.16b, %22.16b, #8 \n" "fmla v10.4h, %22.4h, v0.h[4] \n" "fmla v11.4h, %22.4h, v1.h[0] \n" "fmla v12.4h, %22.4h, v1.h[4] \n" "fmla v13.4h, %22.4h, v2.h[0] \n" "fmla v10.4h, v8.4h, v0.h[5] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "fmla v12.4h, v8.4h, v1.h[5] \n" "fmla v13.4h, v8.4h, v2.h[1] \n" "ext v9.16b, %23.16b, %23.16b, #8 \n" "fmla v10.4h, %23.4h, v0.h[6] \n" "fmla v11.4h, %23.4h, v1.h[2] \n" "fmla v12.4h, %23.4h, v1.h[6] \n" "fmla v13.4h, %23.4h, v2.h[2] \n" "fmla v10.4h, v9.4h, v0.h[7] \n" "fmla v11.4h, v9.4h, v1.h[3] \n" "fmla v12.4h, v9.4h, v1.h[7] \n" "fmla v13.4h, v9.4h, v2.h[3] \n" "ext v6.16b, %24.16b, %24.16b, #8 \n" "fmla v10.4h, %24.4h, v1.h[0] \n" "fmla v11.4h, %24.4h, v1.h[4] \n" "fmla v12.4h, %24.4h, v2.h[0] \n" "fmla v13.4h, %24.4h, v2.h[4] \n" "add %1, %1, #32 \n" "fmla v10.4h, v6.4h, v1.h[1] \n" "fmla v11.4h, v6.4h, v1.h[5] \n" "fmla v12.4h, v6.4h, v2.h[1] \n" "fmla v13.4h, v6.4h, v2.h[5] \n" "ext v7.16b, %25.16b, %25.16b, #8 \n" "fmla v10.4h, %25.4h, v1.h[2] \n" "fmla v11.4h, %25.4h, v1.h[6] \n" "fmla v12.4h, %25.4h, v2.h[2] \n" "fmla v13.4h, %25.4h, v2.h[6] \n" "add %2, %2, #32 \n" "fmla v10.4h, v7.4h, v1.h[3] \n" "fmla v11.4h, v7.4h, v1.h[7] \n" "fmla v12.4h, v7.4h, v2.h[3] \n" "fmla v13.4h, v7.4h, v2.h[7] \n" "add %3, %3, #32 \n" "st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.8h, v1.8h}, [%1] \n" // r00 r01 r02 r03 "prfm pldl1keep, [%0, #128] \n" "ld1 {v12.4h, v13.4h}, [%0] \n" // sum0 sum1 "ext v4.16b, %8.16b, %8.16b, #8 \n" "fmul v10.4h, %8.4h, v0.h[0] \n" "fmul v11.4h, %8.4h, v0.h[4] \n" "fmla v12.4h, v4.4h, v0.h[1] \n" "fmla v13.4h, v4.4h, v0.h[5] \n" "ext v5.16b, %9.16b, %9.16b, #8 \n" "fmla v10.4h, %9.4h, v0.h[2] \n" "fmla v11.4h, %9.4h, v0.h[6] \n" "fmla v12.4h, v5.4h, v0.h[3] \n" "fmla v13.4h, v5.4h, v0.h[7] \n" "ext v6.16b, %10.16b, %10.16b, #8 \n" "fmla v10.4h, %10.4h, v0.h[4] \n" "fmla v11.4h, %10.4h, v1.h[0] \n" "fmla v12.4h, v6.4h, v0.h[5] \n" "fmla v13.4h, v6.4h, v1.h[1] \n" "ext v7.16b, %11.16b, %11.16b, #8 \n" "fmla v10.4h, %11.4h, v0.h[6] \n" "fmla v11.4h, %11.4h, v1.h[2] \n" "fmla v12.4h, v7.4h, v0.h[7] \n" "fmla v13.4h, v7.4h, v1.h[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.8h, v3.8h}, [%2] \n" // r10 r11 r12 r13 "ext v8.16b, %12.16b, %12.16b, #8 \n" "fmla v10.4h, %12.4h, v1.h[0] \n" "fmla v11.4h, %12.4h, v1.h[4] \n" "fmla v12.4h, v8.4h, v1.h[1] \n" "fmla v13.4h, v8.4h, v1.h[5] \n" "ext v9.16b, %13.16b, %13.16b, #8 \n" "fmla v10.4h, %13.4h, v1.h[2] \n" "fmla v11.4h, %13.4h, v1.h[6] \n" "fmla v12.4h, v9.4h, v1.h[3] \n" "fmla v13.4h, v9.4h, v1.h[7] \n" "ext v4.16b, %14.16b, %14.16b, #8 \n" "fmla v10.4h, %14.4h, v2.h[0] \n" "fmla v11.4h, %14.4h, v2.h[4] \n" "fmla v12.4h, v4.4h, v2.h[1] \n" "fmla v13.4h, v4.4h, v2.h[5] \n" "ext v5.16b, %15.16b, %15.16b, #8 \n" "fmla v10.4h, %15.4h, v2.h[2] \n" "fmla v11.4h, %15.4h, v2.h[6] \n" "fmla v12.4h, v5.4h, v2.h[3] \n" "fmla v13.4h, v5.4h, v2.h[7] \n" "ext v6.16b, %16.16b, %16.16b, #8 \n" "fmla v10.4h, %16.4h, v2.h[4] \n" "fmla v11.4h, %16.4h, v3.h[0] \n" "fmla v12.4h, v6.4h, v2.h[5] \n" "fmla v13.4h, v6.4h, v3.h[1] \n" "ext v7.16b, %17.16b, %17.16b, #8 \n" "fmla v10.4h, %17.4h, v2.h[6] \n" "fmla v11.4h, %17.4h, v3.h[2] \n" "fmla v12.4h, v7.4h, v2.h[7] \n" "fmla v13.4h, v7.4h, v3.h[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.8h, v1.8h}, [%3] \n" // r20 r21 r22 r23 "ext v8.16b, %18.16b, %18.16b, #8 \n" "fmla v10.4h, %18.4h, v3.h[0] \n" "fmla v11.4h, %18.4h, v3.h[4] \n" "fmla v12.4h, v8.4h, v3.h[1] \n" "fmla v13.4h, v8.4h, v3.h[5] \n" "ext v9.16b, %19.16b, %19.16b, #8 \n" "fmla v10.4h, %19.4h, v3.h[2] \n" "fmla v11.4h, %19.4h, v3.h[6] \n" "fmla v12.4h, v9.4h, v3.h[3] \n" "fmla v13.4h, v9.4h, v3.h[7] \n" "ext v4.16b, %20.16b, %20.16b, #8 \n" "fmla v10.4h, %20.4h, v0.h[0] \n" "fmla v11.4h, %20.4h, v0.h[4] \n" "fmla v12.4h, v4.4h, v0.h[1] \n" "fmla v13.4h, v4.4h, v0.h[5] \n" "ext v5.16b, %21.16b, %21.16b, #8 \n" "fmla v10.4h, %21.4h, v0.h[2] \n" "fmla v11.4h, %21.4h, v0.h[6] \n" "fmla v12.4h, v5.4h, v0.h[3] \n" "fmla v13.4h, v5.4h, v0.h[7] \n" "ext v6.16b, %22.16b, %22.16b, #8 \n" "fmla v10.4h, %22.4h, v0.h[4] \n" "fmla v11.4h, %22.4h, v1.h[0] \n" "fmla v12.4h, v6.4h, v0.h[5] \n" "fmla v13.4h, v6.4h, v1.h[1] \n" "ext v7.16b, %23.16b, %23.16b, #8 \n" "fmla v10.4h, %23.4h, v0.h[6] \n" "fmla v11.4h, %23.4h, v1.h[2] \n" "fmla v12.4h, v7.4h, v0.h[7] \n" "fmla v13.4h, v7.4h, v1.h[3] \n" "ext v8.16b, %24.16b, %24.16b, #8 \n" "fmla v10.4h, %24.4h, v1.h[0] \n" "fmla v11.4h, %24.4h, v1.h[4] \n" "fmla v12.4h, v8.4h, v1.h[1] \n" "fmla v13.4h, v8.4h, v1.h[5] \n" "ext v9.16b, %25.16b, %25.16b, #8 \n" "fmla v10.4h, %25.4h, v1.h[2] \n" "fmla v11.4h, %25.4h, v1.h[6] \n" "fmla v12.4h, v9.4h, v1.h[3] \n" "fmla v13.4h, v9.4h, v1.h[7] \n" "add %1, %1, #16 \n" "fadd v10.4h, v10.4h, v12.4h \n" "add %2, %2, #16 \n" "fadd v11.4h, v11.4h, v13.4h \n" "add %3, %3, #16 \n" "st1 {v10.4h, v11.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%1] \n" // r00 r01 r02 "prfm pldl1keep, [%0, #64] \n" "ld1 {v13.4h}, [%0] \n" // sum0 "ext v6.16b, %8.16b, %8.16b, #8 \n" "fmul v10.4h, %8.4h, v0.h[0] \n" "fmul v11.4h, v6.4h, v0.h[1] \n" "ext v7.16b, %9.16b, %9.16b, #8 \n" "fmul v12.4h, %9.4h, v0.h[2] \n" "fmla v13.4h, v7.4h, v0.h[3] \n" "ext v8.16b, %10.16b, %10.16b, #8 \n" "fmla v10.4h, %10.4h, v1.h[0] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "ext v9.16b, %11.16b, %11.16b, #8 \n" "fmla v12.4h, %11.4h, v1.h[2] \n" "fmla v13.4h, v9.4h, v1.h[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v3.4h, v4.4h, v5.4h}, [%2] \n" // r10 r11 r12 "ext v6.16b, %12.16b, %12.16b, #8 \n" "fmla v10.4h, %12.4h, v2.h[0] \n" "fmla v11.4h, v6.4h, v2.h[1] \n" "ext v7.16b, %13.16b, %13.16b, #8 \n" "fmla v12.4h, %13.4h, v2.h[2] \n" "fmla v13.4h, v7.4h, v2.h[3] \n" "ext v8.16b, %14.16b, %14.16b, #8 \n" "fmla v10.4h, %14.4h, v3.h[0] \n" "fmla v11.4h, v8.4h, v3.h[1] \n" "ext v9.16b, %15.16b, %15.16b, #8 \n" "fmla v12.4h, %15.4h, v3.h[2] \n" "fmla v13.4h, v9.4h, v3.h[3] \n" "ext v6.16b, %16.16b, %16.16b, #8 \n" "fmla v10.4h, %16.4h, v4.h[0] \n" "fmla v11.4h, v6.4h, v4.h[1] \n" "ext v7.16b, %17.16b, %17.16b, #8 \n" "fmla v12.4h, %17.4h, v4.h[2] \n" "fmla v13.4h, v7.4h, v4.h[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%3] \n" // r20 r21 r22 "ext v8.16b, %18.16b, %18.16b, #8 \n" "fmla v10.4h, %18.4h, v5.h[0] \n" "fmla v11.4h, v8.4h, v5.h[1] \n" "ext v9.16b, %19.16b, %19.16b, #8 \n" "fmla v12.4h, %19.4h, v5.h[2] \n" "fmla v13.4h, v9.4h, v5.h[3] \n" "ext v6.16b, %20.16b, %20.16b, #8 \n" "fmla v10.4h, %20.4h, v0.h[0] \n" "fmla v11.4h, v6.4h, v0.h[1] \n" "ext v7.16b, %21.16b, %21.16b, #8 \n" "fmla v12.4h, %21.4h, v0.h[2] \n" "fmla v13.4h, v7.4h, v0.h[3] \n" "ext v8.16b, %22.16b, %22.16b, #8 \n" "fmla v10.4h, %22.4h, v1.h[0] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "ext v9.16b, %23.16b, %23.16b, #8 \n" "fmla v12.4h, %23.4h, v1.h[2] \n" "fmla v13.4h, v9.4h, v1.h[3] \n" "ext v6.16b, %24.16b, %24.16b, #8 \n" "fmla v10.4h, %24.4h, v2.h[0] \n" "fmla v11.4h, v6.4h, v2.h[1] \n" "ext v7.16b, %25.16b, %25.16b, #8 \n" "fmla v12.4h, %25.4h, v2.h[2] \n" "fmla v13.4h, v7.4h, v2.h[3] \n" "fadd v10.4h, v10.4h, v11.4h \n" "add %1, %1, #8 \n" "fadd v12.4h, v12.4h, v13.4h \n" "add %2, %2, #8 \n" "fadd v10.4h, v10.4h, v12.4h \n" "add %3, %3, #8 \n" "st1 {v10.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } r0 += 8; r1 += 8; r2 += 8; } } } }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__) #define MXNET_USE_MKL_DROPOUT 1 #endif #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0)) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } std::string Mode2String(int mode) { switch (mode) { case dropout::kTraining: return "training"; case dropout::kAlways: return "always"; default: LOG(FATAL) << "Unknown mode enum " << mode; } LOG(FATAL) << "should not reach here "; return ""; } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream p_s, mode_s, axes_s, cudnn_off_s; p_s << p; mode_s << mode; axes_s << axes; cudnn_off_s << cudnn_off; (*dict)["p"] = p_s.str(); (*dict)["mode"] = Mode2String(mode); (*dict)["axes"] = axes_s.str(); (*dict)["cudnn_off"] = cudnn_off_s.str(); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; if (sizeof(DType) > sizeof(int)) { // allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr` Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s); maskptr = temp.dptr_; } BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1; outptr[i] = dataptr[i] * maskVal; mask.dptr_[i] = maskVal; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; const DType *maskptr = mask.dptr_; const int count = mask.shape_[0] * mask.shape_[1]; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { if (req[dropout::kOut] == kWriteInplace) return; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
cpu_adam.h
#pragma once #include <cuda_fp16.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <cassert> #include "cuda.h" #include "custom_cuda_layers.h" #include "simd.h" #define STEP(SPAN) \ void Step_##SPAN(float* _params, \ float* grads, \ float* _exp_avg, \ float* _exp_avg_sq, \ size_t _param_size, \ __half* dev_param = nullptr, \ bool half_precision = false); class Adam_Optimizer { public: Adam_Optimizer(float alpha = 1e-3, float betta1 = 0.9, float betta2 = 0.999, float eps = 1e-8, float weight_decay = 0, bool adamw_mode = true) : _alpha(alpha), _betta1(betta1), _betta2(betta2), _eps(eps), _weight_decay(weight_decay), _betta1_t(1.0), _betta2_t(1.0), _step(0), _buf_index(false), _adamw_mode(adamw_mode) { cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); _streams[0] = Context::Instance().GetCurrentStream(); _streams[1] = Context::Instance().GetNewStream(); } ~Adam_Optimizer() { cudaFreeHost(_doubled_buffer[0]); cudaFreeHost(_doubled_buffer[1]); } #if defined(__AVX512__) or defined(__AVX256__) template <int span> void Step_AVX(size_t* rounded_size, float* _params, float* grads, float* _exp_avg, float* _exp_avg_sq, size_t param_size, __half* dev_param = nullptr, bool half_precision = false); #endif STEP(1) STEP(4) STEP(8) inline void SynchronizeStreams() { for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); } inline void IncrementStep(size_t step, float beta1, float beta2) { if (beta1 != _betta1 || beta2 != _betta2) { _step = step; _betta1 = beta1; _betta2 = beta2; _betta1_t = std::pow(_betta1, step); _betta2_t = std::pow(_betta2, step); } else { _step++; if (_step != step) { _betta1_t = std::pow(_betta1, step); _betta2_t = std::pow(_betta2, step); _step = step; } else { _betta1_t *= _betta1; _betta2_t *= _betta2; } } } inline void update_state(float lr, float epsilon, float weight_decay, bool bias_correction) { _alpha = lr; _eps = epsilon; _weight_decay = weight_decay; _bias_correction1 = 1.0f; _bias_correction2 = 1.0f; if (bias_correction == 1) { _bias_correction1 = 1 - _betta1_t; _bias_correction2 = 1 / sqrt(1 - _betta2_t); } } private: float _alpha; float _betta1; float _betta2; float _eps; float _weight_decay; float _betta1_t; float _betta2_t; size_t _step; float _bias_correction1; float _bias_correction2; float* _doubled_buffer[2]; bool _buf_index; bool _adamw_mode; cudaStream_t _streams[2]; }; #if defined(__AVX512__) or defined(__AVX256__) template <int span> void Adam_Optimizer::Step_AVX(size_t* rounded_size, float* _params, float* grads, float* _exp_avg, float* _exp_avg_sq, size_t _param_size, __half* dev_params, bool half_precision) { size_t new_rounded_size = 0; AVX_Data betta1_4; betta1_4.data = SIMD_SET(_betta1); AVX_Data betta2_4; betta2_4.data = SIMD_SET(_betta2); float betta1_minus1 = 1 - _betta1; float betta2_minus1 = 1 - _betta2; AVX_Data betta1_minus1_4; betta1_minus1_4.data = SIMD_SET(betta1_minus1); AVX_Data betta2_minus1_4; betta2_minus1_4.data = SIMD_SET(betta2_minus1); AVX_Data bias2_sqrt; bias2_sqrt.data = SIMD_SET(_bias_correction2); AVX_Data eps_4; eps_4.data = SIMD_SET(_eps); float step_size = -1 * _alpha / _bias_correction1; AVX_Data step_size_4; step_size_4.data = SIMD_SET(step_size); float w_decay = -1 * _alpha * _weight_decay; AVX_Data weight_decay4; if (_weight_decay > 0) weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay)); new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span); for (size_t t = 0; t < new_rounded_size; t += TILE) { size_t copy_size = TILE; if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; size_t offset = copy_size + t; if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } #pragma omp parallel for for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { AVX_Data grad_4[span]; simd_load<span>(grad_4, grads + i, half_precision); AVX_Data momentum_4[span]; simd_load<span>(momentum_4, _exp_avg + i, false); AVX_Data variance_4[span]; simd_load<span>(variance_4, _exp_avg_sq + i, false); AVX_Data param_4[span]; simd_load<span>(param_4, _params + i, half_precision); if (_weight_decay > 0 && !_adamw_mode) { simd_fma<span>(grad_4, param_4, weight_decay4, grad_4); } simd_mul<span>(momentum_4, momentum_4, betta1_4); simd_fma<span>(momentum_4, grad_4, betta1_minus1_4, momentum_4); simd_mul<span>(variance_4, variance_4, betta2_4); simd_mul<span>(grad_4, grad_4, grad_4); simd_fma<span>(variance_4, grad_4, betta2_minus1_4, variance_4); simd_sqrt<span>(grad_4, variance_4); simd_fma<span>(grad_4, grad_4, bias2_sqrt, eps_4); simd_div<span>(grad_4, momentum_4, grad_4); if (_weight_decay > 0 && _adamw_mode) { simd_fma<span>(param_4, param_4, weight_decay4, param_4); } simd_fma<span>(param_4, grad_4, step_size_4, param_4); simd_store<span>(_params + i, param_4, half_precision); if (dev_params) { simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); } simd_store<span>(_exp_avg + i, momentum_4, false); simd_store<span>(_exp_avg_sq + i, variance_4, false); } if (dev_params) { if (half_precision) launch_param_update_half( _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); else launch_param_update( _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); _buf_index = !_buf_index; } } *rounded_size = new_rounded_size; } #endif
GB_unaryop__abs_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_uint32 // op(A') function: GB_tran__abs_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_uint32 ( bool *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % John Cristy % % October 1996 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/fx-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5 #define RightShiftOperator 0xf6 #define LessThanEqualOperator 0xf7 #define GreaterThanEqualOperator 0xf8 #define EqualOperator 0xf9 #define NotEqualOperator 0xfa #define LogicalAndOperator 0xfb #define LogicalOrOperator 0xfc #define ExponentialNotation 0xfd struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *image,const char *expression) % % A description of each parameter follows: % % o image: the image. % % o expression: the expression. % */ MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info)); if (fx_info == (FxInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=image; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireCacheView(next); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); /* Convert complex to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % ExceptionInfo *exception) % Image *AddNoiseImageChannel(const Image *image,const ChannelType channel, % const NoiseType noise_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, ExceptionInfo *exception) { Image *noise_image; noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception); return(noise_image); } MagickExport Image *AddNoiseImageChannel(const Image *image, const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; const char *option; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType attenuate; RandomInfo **restrict random_info; ssize_t y; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse) { InheritException(exception,&noise_image->exception); noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ attenuate=1.0; option=GetImageArtifact(image,"attenuate"); if (option != (char *) NULL) attenuate=StringToDouble(option,(char **) NULL); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireCacheView(image); noise_view=AcquireCacheView(noise_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict noise_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelRed(p),noise_type,attenuate))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelGreen(p),noise_type,attenuate))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelBlue(p),noise_type,attenuate))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(GenerateDifferentialNoise( random_info[id],GetPixelOpacity(p),noise_type,attenuate))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(noise_indexes+x,ClampToQuantum( GenerateDifferentialNoise(random_info[id],GetPixelIndex( indexes+x),noise_type,attenuate))); p++; q++; } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); shift_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse) { InheritException(exception,&shift_image->exception); shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); shift_view=AcquireCacheView(shift_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; Quantum quantum; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(p); if (GetPixelGreen(p) < quantum) quantum=GetPixelGreen(p); if (GetPixelBlue(p) < quantum) quantum=GetPixelBlue(p); pixel.red=0.5*(GetPixelRed(p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(p)+factor*quantum); quantum=GetPixelRed(p); if (GetPixelGreen(p) > quantum) quantum=GetPixelGreen(p); if (GetPixelBlue(p) > quantum) quantum=GetPixelBlue(p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); p++; q++; } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageType(clone_image,GrayscaleType); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image); (void) NegateImage(charcoal_image,MagickFalse); (void) SetImageType(charcoal_image,GrayscaleType); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *opacity, % const PixelPacket colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A character string indicating the level of opacity as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *opacity, const PixelPacket colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" CacheView *colorize_view, *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket pixel; MagickStatusType flags; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); colorize_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse) { InheritException(exception,&colorize_image->exception); colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if (opacity == (const char *) NULL) return(colorize_image); /* Determine RGB values of the pen color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; pixel.green=geometry_info.rho; pixel.blue=geometry_info.rho; pixel.opacity=(MagickRealType) OpaqueOpacity; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); colorize_view=AcquireCacheView(colorize_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,((GetPixelRed(p)*(100.0-pixel.red)+ colorize.red*pixel.red)/100.0)); SetPixelGreen(q,((GetPixelGreen(p)*(100.0-pixel.green)+ colorize.green*pixel.green)/100.0)); SetPixelBlue(q,((GetPixelBlue(p)*(100.0-pixel.blue)+ colorize.blue*pixel.blue)/100.0)); SetPixelOpacity(q,((GetPixelOpacity(p)*(100.0- pixel.opacity)+colorize.opacity*pixel.opacity)/100.0)); p++; q++; } sync=SyncCacheViewAuthenticPixels(colorize_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); colorize_view=DestroyCacheView(colorize_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Create color matrix. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass) == MagickFalse) { InheritException(exception,&color_image->exception); color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* ColorMatrix image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); color_view=AcquireCacheView(color_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickRealType pixel; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register IndexPacket *restrict color_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); color_indexes=GetCacheViewAuthenticIndexQueue(color_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { pixel=ColorMatrix[v][0]*GetPixelRed(p)+ColorMatrix[v][1]* GetPixelGreen(p)+ColorMatrix[v][2]*GetPixelBlue(p); if (image->matte != MagickFalse) pixel+=ColorMatrix[v][3]*(QuantumRange-GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) pixel+=ColorMatrix[v][4]*GetPixelIndex(indexes+x); pixel+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: SetPixelRed(q,ClampToQuantum(pixel)); break; case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break; case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break; case 3: { if (image->matte != MagickFalse) SetPixelAlpha(q,ClampToQuantum(pixel)); break; } case 4: { if (image->colorspace == CMYKColorspace) SetPixelIndex(color_indexes+x,ClampToQuantum(pixel)); break; } } } p++; q++; } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % MagickRealType FxEvaluateChannelExpression(FxInfo *fx_info, % const ChannelType channel,const ssize_t x,const ssize_t y, % MagickRealType *alpha,Exceptioninfo *exception) % MagickRealType FxEvaluateExpression(FxInfo *fx_info, % MagickRealType *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickRealType FxChannelStatistics(FxInfo *fx_info,const Image *image, ChannelType channel,const char *symbol,ExceptionInfo *exception) { char key[MaxTextExtent], statistic[MaxTextExtent]; const char *value; register const char *p; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') switch (*++p) /* e.g. depth.r */ { case 'r': channel=RedChannel; break; case 'g': channel=GreenChannel; break; case 'b': channel=BlueChannel; break; case 'c': channel=CyanChannel; break; case 'm': channel=MagentaChannel; break; case 'y': channel=YellowChannel; break; case 'k': channel=BlackChannel; break; default: break; } (void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) return(QuantumScale*StringToDouble(value,(char **) NULL)); (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageChannelDepth(image,channel,exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); (void) FormatLocaleString(statistic,MaxTextExtent,"%g", standard_deviation); } (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static MagickRealType FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t, const ssize_t,const char *,MagickRealType *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static MagickRealType FxGetSymbol(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { char *q, subexpression[MaxTextExtent], symbol[MaxTextExtent]; const char *p, *value; Image *image; MagickPixelPacket pixel; MagickRealType alpha, beta; PointInfo point; register ssize_t i; size_t length; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) *(p+1)) == 0) { if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); i=(ssize_t) (alpha+0.5); p++; } if (*p == '.') p++; } if ((isalpha((int) *(p+1)) == 0) && (*p == 'p')) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); point.x=alpha; point.y=beta; p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); point.x+=alpha; point.y+=beta; p++; } if (*p == '.') p++; } } length=GetImageListLength(fx_info->images); while (i < 0) i+=(ssize_t) length; i%=length; image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } GetMagickPixelPacket(image,&pixel); (void) InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MaxTextExtent]; (void) CopyMagickString(name,p,MaxTextExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { MagickPixelPacket *color; color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors, name); if (color != (MagickPixelPacket *) NULL) { pixel=(*color); p+=strlen(name); } else if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString(name), CloneMagickPixelPacket(&pixel)); p+=strlen(name); } } } (void) CopyMagickString(symbol,p,MaxTextExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedChannel: return(QuantumScale*pixel.red); case GreenChannel: return(QuantumScale*pixel.green); case BlueChannel: return(QuantumScale*pixel.blue); case OpacityChannel: { MagickRealType alpha; if (pixel.matte == MagickFalse) return(1.0); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); return(alpha); } case IndexChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } case DefaultChannels: { return(QuantumScale*MagickPixelIntensityToQuantum(&pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((MagickRealType) (QuantumScale*GetPixelAlpha(&pixel))); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case OpacityChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BlueChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case OpacityChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case IndexChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } return(0.0); } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((MagickRealType) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"intensity") == 0) return(QuantumScale*MagickPixelIntensityToQuantum(&pixel)); if (LocaleCompare(symbol,"i") == 0) return((MagickRealType) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((MagickRealType) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(lightness); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.2126*pixel.red+0.7152*pixel.green+0.0722*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.blue); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((MagickRealType) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.opacity); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((MagickRealType) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((MagickRealType) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((MagickRealType) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((MagickRealType) image->page.y); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((MagickRealType) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((MagickRealType) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.green); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) { MagickRealType depth; depth=(MagickRealType) GetImageChannelDepth(image,channel, fx_info->exception); return(depth); } break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return((MagickRealType) StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=0; level=0; subexpression=(const char *) NULL; target=NullPrecedence; while (*expression != '\0') { precedence=UndefinedPrecedence; if ((isspace((int) ((char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0)) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((char) c)) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((char) *expression)) != 0) || (strchr("(",(int) *expression) != (char *) NULL)) || ((isdigit((int) ((char) c)) == 0) && (isdigit((int) ((char) *expression)) != 0))) && (strchr("xy",(int) *expression) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static MagickRealType FxEvaluateSubexpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y, const char *expression,MagickRealType *beta,ExceptionInfo *exception) { char *q, subexpression[MaxTextExtent]; MagickRealType alpha, gamma; register const char *p; *beta=0.0; if (exception->severity != UndefinedException) return(0.0); while (isspace((int) *expression) != 0) expression++; if (*expression == '\0') { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "MissingExpression","`%s'",expression); return(0.0); } *subexpression='\0'; p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta, exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) (~(size_t) *beta); return(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow((double) alpha,(double) FxEvaluateSubexpression(fx_info, channel,x,y,++p,beta,exception)); return(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); if (*beta == 0.0) { if (exception->severity == UndefinedException) (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=fabs(floor(((double) *beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(fmod((double) alpha,(double) *beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); return(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); return(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(fabs(alpha-(*beta)) <= MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(fabs(alpha-(*beta)) > MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); return(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); return(*beta); } case LogicalAndOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(alpha > 0.0) && (gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case LogicalOrOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(alpha > 0.0) || (gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case '?': { MagickRealType gamma; (void) CopyMagickString(subexpression,++p,MaxTextExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } if (fabs((double) alpha) > MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,beta,exception); return(gamma); } case '=': { char numeric[MaxTextExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); (void) FormatLocaleString(numeric,MaxTextExtent,"%g",(double) *beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); return(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,beta, exception); return(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { (void) CopyMagickString(subexpression,expression+1,MaxTextExtent); subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta, exception); return(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return((MagickRealType) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) fabs((double) alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) acosh((double) alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) acos((double) alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0.0) return(1.0); gamma=2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha); return(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) asinh((double) alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) asin((double) alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) atan2((double) alpha,(double) *beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) atanh((double) alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) atan((double) alpha)); } if (LocaleCompare(expression,"a") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) ceil((double) alpha)); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) cosh((double) alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) cos((double) alpha)); } if (LocaleCompare(expression,"c") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: type="cyan"; break; case MagentaChannel: type="magenta"; break; case YellowChannel: type="yellow"; break; case OpacityChannel: type="opacity"; break; case BlackChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedChannel: type="red"; break; case GreenChannel: type="green"; break; case BlueChannel: type="blue"; break; case OpacityChannel: type="opacity"; break; default: type="unknown"; break; } (void) CopyMagickString(subexpression,expression+6,MaxTextExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file, "%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename, (double) x,(double) y,type,subexpression,GetMagickPrecision(), (double) alpha); return(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) (alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) return((MagickRealType) MagickEpsilon); if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) exp((double) alpha)); } if (LocaleCompare(expression,"e") == 0) return((MagickRealType) 2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) floor((double) alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); gamma=exp((double) (-alpha*alpha/2.0))/sqrt(2.0*MagickPI); return((MagickRealType) gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+0.5)); return((MagickRealType) gcd); } if (LocaleCompare(expression,"g") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleCompare(expression,"hue") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) hypot((double) alpha,(double) *beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) floor(alpha)); } #if defined(MAGICKCORE_HAVE_ISNAN) if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) !!isnan((double) alpha)); } #endif if (LocaleCompare(expression,"i") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return((MagickRealType) j0((double) alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return((MagickRealType) j1((double) alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0.0) return(1.0); gamma=(MagickRealType) (2.0*j1((double) (MagickPI*alpha))/ (MagickPI*alpha)); return(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return((MagickRealType) log((double) alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta, exception); return((MagickRealType) log10((double) alpha))/log10(2.0); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) log10((double) alpha)); } if (LocaleCompare(expression,"lightness") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) return((MagickRealType) QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); gamma=alpha-floor((double) (alpha/(*beta)))*(*beta); return(gamma); } if (LocaleCompare(expression,"m") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) return(1.0); if (LocaleCompare(expression,"o") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) return((MagickRealType) MagickPHI); if (LocaleCompare(expression,"pi") == 0) return((MagickRealType) MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) pow((double) alpha,(double) *beta)); } if (LocaleCompare(expression,"p") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) return((MagickRealType) QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) return((MagickRealType) QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) return((MagickRealType) GetPseudoRandomValue(fx_info->random_info)); if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) floor((double) alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0) return(1.0); gamma=(MagickRealType) (sin((double) (MagickPI*alpha))/ (MagickPI*alpha)); return(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) sinh((double) alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) sin((double) alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) sqrt((double) alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta, exception); return((MagickRealType) (1.0/(1.0+exp((double) (4.0*alpha))))); } if (LocaleCompare(expression,"s") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) tanh((double) alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) tan((double) alpha)); } if (LocaleCompare(expression,"Transparent") == 0) return(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); if (alpha >= 0.0) return((MagickRealType) floor((double) alpha)); return((MagickRealType) ceil((double) alpha)); } if (LocaleCompare(expression,"t") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); } while (fabs((double) alpha) >= MagickEpsilon); return((MagickRealType) *beta); } if (LocaleCompare(expression,"w") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); return(alpha); } MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, MagickRealType *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, MagickRealType *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); fx_info->file=file; return(status); } MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y, MagickRealType *alpha,ExceptionInfo *exception) { MagickRealType beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&beta, exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % Image *FxImageChannel(const Image *image,const ChannelType channel, % const char *expression,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; MagickRealType alpha; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) return((FxInfo **) NULL); (void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0,exception); for (i=0; i < (ssize_t) number_threads; i++) { fx_info[i]=AcquireFxInfo(image,fx_expression); if (fx_info[i] == (FxInfo *) NULL) return(DestroyFxThreadSet(fx_info)); (void) FxPreprocessExpression(fx_info[i],&alpha,fx_info[i]->exception); } fx_expression=DestroyString(fx_expression); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { Image *fx_image; fx_image=FxImageChannel(image,GrayChannel,expression,exception); return(fx_image); } MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel, const char *expression,ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view; FxInfo **restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType alpha; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse) { InheritException(exception,&fx_image->exception); fx_image=DestroyImage(fx_image); return((Image *) NULL); } fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) { fx_image=DestroyImage(fx_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } status=FxPreprocessExpression(fx_info[0],&alpha,exception); if (status == MagickFalse) { fx_image=DestroyImage(fx_image); fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; fx_view=AcquireCacheView(fx_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickRealType alpha; register IndexPacket *restrict fx_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view); alpha=0.0; for (x=0; x < (ssize_t) fx_image->columns; x++) { if ((channel & RedChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y, &alpha,exception); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange* alpha)); } if ((channel & GreenChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y, &alpha,exception); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange* alpha)); } if ((channel & BlueChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y, &alpha,exception); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange* alpha)); } if ((channel & OpacityChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y, &alpha,exception); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); else SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange-QuantumRange*alpha))); } if (((channel & IndexChannel) != 0) && (fx_image->colorspace == CMYKColorspace)) { (void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y, &alpha,exception); SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } q++; } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImageChannel) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *image_view, *implode_view; Image *implode_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType radius; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); implode_image=CloneImage(image,0,0,MagickTrue,exception); if (implode_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse) { InheritException(exception,&implode_image->exception); implode_image=DestroyImage(implode_image); return((Image *) NULL); } if (implode_image->background_color.opacity != OpaqueOpacity) implode_image->matte=MagickTrue; /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*image->columns; center.y=0.5*image->rows; radius=center.x; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) { scale.x=(double) image->rows/(double) image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(implode_image,&zero); image_view=AcquireCacheView(image); implode_view=AcquireCacheView(implode_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; MagickRealType distance; PointInfo delta; register IndexPacket *restrict implode_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin((double) (MagickPI*sqrt((double) distance)/ radius/2)),-amount); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+ center.x),(double) (factor*delta.y/scale.y+center.y),&pixel, exception); SetPixelPacket(implode_image,&pixel,q,implode_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image, const size_t number_frames,ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; MagickRealType alpha, beta; register const Image *next; register ssize_t i; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (i=1; i < (ssize_t) number_frames; i++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (i=0; i < (ssize_t) number_frames; i++) { CacheView *image_view, *morph_view; beta=(MagickRealType) (i+1.0)/(MagickRealType) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha* next->rows+beta*GetNextImageInList(next)->rows+0.5), next->filter,next->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse) { InheritException(exception,&morph_image->exception); morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter, GetNextImageInList(next)->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireCacheView(morph_image); morph_view=AcquireCacheView(morph_images); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { SetPixelRed(q,ClampToQuantum(alpha* GetPixelRed(q)+beta*GetPixelRed(p))); SetPixelGreen(q,ClampToQuantum(alpha* GetPixelGreen(q)+beta*GetPixelGreen(p))); SetPixelBlue(q,ClampToQuantum(alpha* GetPixelBlue(q)+beta*GetPixelBlue(p))); SetPixelOpacity(q,ClampToQuantum(alpha* GetPixelOpacity(q)+beta*GetPixelOpacity(p))); p++; q++; } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (i < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const MagickRealType pixel,const MagickRealType noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); return(plasma); } MagickExport MagickBooleanType PlasmaImageProxy(Image *image, CacheView *image_view,RandomInfo *random_info,const SegmentInfo *segment, size_t attenuate,size_t depth) { ExceptionInfo *exception; MagickRealType plasma; PixelPacket u, v; ssize_t x, x_mid, y, y_mid; if (((segment->x2-segment->x1) == 0.0) && ((segment->y2-segment->y1) == 0.0)) return(MagickTrue); if (depth != 0) { SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; return(PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth)); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((segment->x1 == (double) x_mid) && (segment->x2 == (double) x_mid) && (segment->y1 == (double) y_mid) && (segment->y2 == (double) y_mid)) return(MagickFalse); /* Average pixels and apply plasma. */ exception=(&image->exception); plasma=(MagickRealType) QuantumRange/(2.0*attenuate); if ((segment->x1 != (double) x_mid) || (segment->x2 != (double) x_mid)) { register PixelPacket *restrict q; /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); if (segment->x1 != segment->x2) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((segment->y1 != (double) y_mid) || (segment->y2 != (double) y_mid)) { if ((segment->x1 != (double) x_mid) || (segment->y2 != (double) y_mid)) { register PixelPacket *restrict q; /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (segment->y1 != segment->y2) { register PixelPacket *restrict q; /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((segment->x1 != segment->x2) || (segment->y1 != segment->y2)) { register PixelPacket *restrict q; /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneVirtualPixel(image,x,y,&u,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,plasma)); SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0,plasma)); SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0,plasma)); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (((segment->x2-segment->x1) < 3.0) && ((segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth) { CacheView *image_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image_view=AcquireCacheView(image); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,random_info,segment,attenuate,depth); random_info=DestroyRandomInfo(random_info); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the AnnotateImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const double angle,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const double angle,ExceptionInfo *exception) { const char *value; Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; value=GetImageProperty(image,"Caption"); if (value != (const char *) NULL) { char *caption, geometry[MaxTextExtent]; DrawInfo *annotate_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image, value); (void) CloneString(&annotate_info->text,caption); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &caption); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*(metrics.ascent-metrics.descent)+0.5)); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image); (void) CloneString(&annotate_info->text,caption); (void) FormatLocaleString(geometry,MaxTextExtent,"+0+%g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); caption=DestroyString(caption); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image); (void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,OverCompositeOp,caption_image, quantum,(ssize_t) (image->rows+3*quantum/2)); caption_image=DestroyImage(caption_image); } (void) QueryColorDatabase("none",&picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); InheritException(&bend_image->exception,exception); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,OverCompositeOp,picture_image, (ssize_t) (-0.01*picture_image->columns/2.0),0L); picture_image=DestroyImage(picture_image); (void) QueryColorDatabase("none",&polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); sepia_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse) { InheritException(exception,&sepia_image->exception); sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); sepia_view=AcquireCacheView(sepia_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity, tone; intensity=(MagickRealType) PixelIntensityToQuantum(p); tone=intensity > threshold ? (MagickRealType) QuantumRange : intensity+ (MagickRealType) QuantumRange-threshold; SetPixelRed(q,ClampToQuantum(tone)); tone=intensity > (7.0*threshold/6.0) ? (MagickRealType) QuantumRange : intensity+(MagickRealType) QuantumRange-7.0*threshold/6.0; SetPixelGreen(q,ClampToQuantum(tone)); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(q,ClampToQuantum(tone)); tone=threshold/7.0; if ((MagickRealType) GetPixelGreen(q) < tone) SetPixelGreen(q,ClampToQuantum(tone)); if ((MagickRealType) GetPixelBlue(q) < tone) SetPixelBlue(q,ClampToQuantum(tone)); p++; q++; } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image); (void) ContrastImage(sepia_image,MagickTrue); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double opacity, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double opacity, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod); clone_image->compose=OverCompositeOp; border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorDatabase("none",&clone_image->border_color,exception); border_image=BorderImage(clone_image,&border_info,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->matte == MagickFalse) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel); /* Shadow image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(border_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) border_image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { SetPixelRed(q,border_image->background_color.red); SetPixelGreen(q,border_image->background_color.green); SetPixelBlue(q,border_image->background_color.blue); if (border_image->matte == MagickFalse) SetPixelOpacity(q,border_image->background_color.opacity); else SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange-GetPixelAlpha(q)*opacity/100.0))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadowImage) #endif proceed=SetImageProgress(image,ShadowImageTag,progress++, border_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; MagickPixelPacket zero; RandomInfo **restrict random_info; ssize_t y; /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; GetMagickPixelPacket(random_image,&zero); random_info=AcquireRandomInfoThreadSet(); random_view=AcquireCacheView(random_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(random_view); pixel=zero; for (x=0; x < (ssize_t) random_image->columns; x++) { pixel.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); pixel.green=pixel.red; pixel.blue=pixel.red; if (image->colorspace == CMYKColorspace) pixel.index=pixel.red; SetPixelPacket(random_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image); (void) NegateImage(dodge_image,MagickFalse); (void) TransformImage(&dodge_image,(char *) NULL,"50%"); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((MagickRealType) image->colormap[i].red > threshold) image->colormap[i].red=(Quantum) QuantumRange-image->colormap[i].red; if ((MagickRealType) image->colormap[i].green > threshold) image->colormap[i].green=(Quantum) QuantumRange- image->colormap[i].green; if ((MagickRealType) image->colormap[i].blue > threshold) image->colormap[i].blue=(Quantum) QuantumRange- image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((MagickRealType) GetPixelRed(q) > threshold) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((MagickRealType) GetPixelGreen(q) > threshold) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((MagickRealType) GetPixelBlue(q) > threshold) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelPacket pixel; register PixelPacket *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse) { InheritException(exception,&stegano_image->exception); stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=image->offset; status=MagickTrue; watermark_view=AcquireCacheView(watermark); stegano_view=AcquireCacheView(stegano_image); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { (void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception); if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (PixelPacket *) NULL) break; switch (c) { case 0: { SetBit(GetPixelRed(q),j,GetBit(PixelIntensityToQuantum( &pixel),i)); break; } case 1: { SetBit(GetPixelGreen(q),j,GetBit(PixelIntensityToQuantum( &pixel),i)); break; } case 2: { SetBit(GetPixelBlue(q),j,GetBit(PixelIntensityToQuantum( &pixel),i)); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (stegano_image->storage_class == PseudoClass) (void) SyncImage(stegano_image); if (status == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse) { InheritException(exception,&stereo_image->exception); stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const PixelPacket *restrict p, *restrict q; register ssize_t x; register PixelPacket *restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(r,GetPixelRed(p)); SetPixelGreen(r,GetPixelGreen(q)); SetPixelBlue(r,GetPixelBlue(q)); SetPixelOpacity(r,(GetPixelOpacity(p)+q->opacity)/2); p++; q++; r++; } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *image_view, *swirl_view; Image *swirl_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType radius; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); swirl_image=CloneImage(image,0,0,MagickTrue,exception); if (swirl_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse) { InheritException(exception,&swirl_image->exception); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } if (swirl_image->background_color.opacity != OpaqueOpacity) swirl_image->matte=MagickTrue; /* Compute scaling factor. */ center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) scale.x=(double) image->rows/(double) image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(swirl_image,&zero); image_view=AcquireCacheView(image); swirl_view=AcquireCacheView(swirl_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; MagickRealType distance; PointInfo delta; register IndexPacket *restrict swirl_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { MagickRealType cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/ scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+ center.y),&pixel,exception); SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *opacity, % const PixelPacket tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *opacity, const PixelPacket tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket color_vector, pixel; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse) { InheritException(exception,&tint_image->exception); tint_image=DestroyImage(tint_image); return((Image *) NULL); } if (opacity == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; else pixel.green=pixel.red; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; else pixel.blue=pixel.red; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; else pixel.opacity=(MagickRealType) OpaqueOpacity; color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0- PixelIntensity(&tint)); color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0- PixelIntensity(&tint)); color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0- PixelIntensity(&tint)); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); tint_view=AcquireCacheView(tint_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; MagickRealType weight; weight=QuantumScale*GetPixelRed(p)-0.5; pixel.red=(MagickRealType) GetPixelRed(p)+color_vector.red*(1.0-(4.0* (weight*weight))); SetPixelRed(q,ClampToQuantum(pixel.red)); weight=QuantumScale*GetPixelGreen(p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(p)+color_vector.green*(1.0-(4.0* (weight*weight))); SetPixelGreen(q,ClampToQuantum(pixel.green)); weight=QuantumScale*GetPixelBlue(p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(p)+color_vector.blue*(1.0-(4.0* (weight*weight))); SetPixelBlue(q,ClampToQuantum(pixel.blue)); SetPixelOpacity(q,GetPixelOpacity(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MaxTextExtent]; DrawInfo *draw_info; Image *canvas_image, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse) { InheritException(exception,&canvas_image->exception); canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } canvas_image->matte=MagickTrue; oval_image=CloneImage(canvas_image,canvas_image->columns, canvas_image->rows,MagickTrue,exception); if (oval_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } (void) QueryColorDatabase("#000000",&oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorDatabase("#ffffff",&draw_info->fill,exception); (void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception); (void) FormatLocaleString(ellipse,MaxTextExtent, "ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0, image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } blur_image->matte=MagickFalse; (void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception); canvas_image=DestroyImage(canvas_image); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *image_view, *wave_view; Image *wave_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse) { InheritException(exception,&wave_image->exception); wave_image=DestroyImage(wave_image); return((Image *) NULL); } if (wave_image->background_color.opacity != OpaqueOpacity) wave_image->matte=MagickTrue; /* Allocate sine map. */ sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (MagickRealType *) NULL) { wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(wave_image,&zero); image_view=AcquireCacheView(image); wave_view=AcquireCacheView(wave_image); (void) SetCacheViewVirtualPixelMethod(image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(wave_view); pixel=zero; for (x=0; x < (ssize_t) wave_image->columns; x++) { (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel, exception); SetPixelPacket(wave_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); image_view=DestroyCacheView(image_view); sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); }
ms_non.c
/*======================= M A N D E L B R O T =======================*/ // Implementation Based on Rosetta Code Example // 1) Draws Mandelbrot set for Fc(z)=z*z +c using // Mandelbrot algorithm (boolean escape time). // 2) Technique of creating ppm file is based on // the code of Claudio Rocchini. http://en. // wikipedia.org/wiki/Image:Color_complex_plot // .jpg. Create 24 bit color graphic file, // portable pixmap file = PPM, see http://en. // wikipedia.org/wiki/Portable_pixmap to see // the file use external application (graphic // viewer). // Inclusions #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <time.h> // Definitions #define SHOW_TIMES 1 // Image Structure Definition typedef struct { unsigned char color[3]; } image; // Main int main ( int argc, char *argv[ ] ) { const unsigned int maxColorComponentValue = 255; // Color Component - 24-bit RGB Coded from 0 to 255 const unsigned int iMax = 200; // Maximum Number of Iterations unsigned int i = 0; // Iteration Number unsigned int iX = 0; // Screen (Integer) X Coordinate unsigned int iY = 0; // Screen (Integer) Y Coordinate unsigned int iXMax = 2048; // Generated Image Width unsigned int iYMax = 2048; // Generated Image Height unsigned int cores = 0; // Number of Cores Utilized in Parallel unsigned int display = 0; // Argument to Display Debug Text unsigned int size = 0; // Size of Generated Image in Pixels unsigned int thisPixelNum = 0; // Iterator for Tracking Pixel Number const double cXMin = -1.5; // Generated View Window X Minimum const double cXMax = 0.5; // Generated View Window X Maximum const double cYMin = -1.0; // Generated View Window Y Minimum const double cYMax = 1.0; // Generated View Window Y Maximum const double escapeRadius = 2.0; // Bail-Out Value - Radius of Circle double cX = 0.0; // World (Double) X Coordinate double cY = 0.0; // World (Double) Y Coordinate double escapeRadius2 = 4.0; // Square of EscapeRadius double pixelWidth = 0.0; // Determination of Relative Pixel Width from Window and Size Parameters double pixelHeight = 0.0; // Determination of Relative Pixel Height from Window and Size Parameters double zX = 0.0; // Z = Zx + Zy * i; Z0 = 0 double zY = 0.0; // (see just above) double zX2 = 0.0; // Square of Zx double zY2 = 0.0; // Square of Zy char *filename = NULL; // Filename to Save Mandelbrot Image char *comment = "# "; // Dynamic File Header Comment image *fractal; // Image Being Built FILE *fp = NULL; // A File double jobStart = 0.0; // Timer for Program Start double mandelbrotStart = 0.0; // Timer for Mandelbrot Fractal Generation Start double mandelbrotEnd = 0.0; // Timer for Mandelbrot Fractal Generation End double writeStart = 0.0; // Timer for Image Write Out Start double writeEnd = 0.0; // Timer for Image Write Out End double jobEnd = 0.0; // Timer for Program Completion double mandelbrotTime = 0.0; // Program Time in Color Search Operation Phase double writeTime = 0.0; // Program Time in Image Write Phase double addTime = 0.0; // Additional Overhead Time double totalTime = 0.0; // Total Program Time // Set Job Start Timer jobStart = omp_get_wtime( ); // Display Usage Instructions If Argument Count Incorrect if( argc != 5 ) { printf( "\nUsage: %s <output> <x/y> <cores> <display>\n", argv[0] ); printf( " Output - a .ppm image to output with the fractal.\n" ); printf( " X/Y - width and height of image in pixels.\n" ); printf( " Cores - number of cores to utilize for parallel operation.\n" ); printf( " Display - 1 displays debug text, 0 just displays time values for raw data tables.\n\n" ); return 1; } // Parse Input Arguments filename = argv[1]; iXMax = atoi( argv[2] ); iYMax = iXMax; cores = atoi( argv[3] ); display = atoi( argv[4] ); // Intro Text if( display ) { printf( "\n = = = Mandelbrot Set Generator = = = \n\n" ); } // Early Calculations size = iXMax * iYMax; // Determination of Size from pixelWidth = ( cXMax - cXMin ) / iXMax; // Determination of Relative Pixel Width from Window and Size Parameters pixelHeight = ( cYMax - cYMin ) / iYMax; // Determination of Relative Pixel Height from Window and Size Parameters escapeRadius2 = escapeRadius * escapeRadius; // Calculate Square of EscapeRadius. // OpenMP Initial Tasks and Tests omp_set_num_threads( cores ); if( display ) { printf( "Using %d Cores of Maximum %d Cores Available\nTesting - Report\n", cores, omp_get_max_threads( ) ); #pragma omp parallel for for( size_t i = 0; i < (size_t)omp_get_num_threads( ); i++ ) { printf( " Core %d of %d Reporting!\n", omp_get_thread_num( ), omp_get_num_threads( ) ); } } // Allocate Storage for Image Colors fractal = malloc( size * sizeof( *fractal ) ); // Compute Fractal Image if( display ) { printf( "\nGenerating Mandelbrot Set...\n" ); } mandelbrotStart = omp_get_wtime( ); #pragma omp parallel for shared( fractal, iXMax, iYMax, pixelHeight, pixelWidth, escapeRadius2 ) \ private( i, iX, iY, cX, cY, zX, zY, zX2, zY2, thisPixelNum ) schedule(guided,100) //default( none ) \ schedule( dynamic ) for( iY = 0; iY < iYMax; iY++ ) { cY = cYMin + iY * pixelHeight; if( fabs( cY ) < ( pixelHeight / 2 ) ) { cY = 0.0; // Main Antenna } for( iX = 0; iX < iXMax; iX++ ) { cX = cXMin + iX * pixelWidth; // Initial Value of Orbit - Critical Point Z = 0 zX = 0.0; zY = 0.0; zX2 = zX * zX; zY2 = zY * zY; for( i = 0; ( i < iMax ) && ( ( zX2 + zY2 ) < escapeRadius2 ); i++ ) { zY = 2 * zX * zY + cY; zX = zX2 - zY2 + cX; zX2 = zX * zX; zY2 = zY * zY; }; // Save Pixel Color thisPixelNum = iY * iYMax + iX; // Where is this pixel in the image? if( i == iMax ) { // Color for Interior of Mandelbrot Set - Dark Gray fractal[thisPixelNum].color[0] = 37; // Red Component fractal[thisPixelNum].color[1] = 37; // Green Component fractal[thisPixelNum].color[2] = 37; // Blue Component } else { // Color for Exterior of Mandelbrot Set - Blue fractal[thisPixelNum].color[0] = 0; // Red Component fractal[thisPixelNum].color[1] = 0; // Green Component fractal[thisPixelNum].color[2] = 255; // Blue Component } // End If } // End iX For } // End iY For mandelbrotEnd = omp_get_wtime( ); // Image File Write Phase if( display ) { printf( "Writing File Out...\n" ); } writeStart = omp_get_wtime( ); // Create New File - give it a name and open it in binary mode. fp = fopen( filename, "wb" ); // b - Binary Mode // Write ASCII Header to the File fprintf( fp, "P6\n %s\n %d\n %d\n %d\n", comment, iXMax, iYMax, maxColorComponentValue ); // Image File Write Out - must be done serially. for( iY = 0; iY < iYMax; iY++ ) { for( iX = 0; iX < iXMax; iX++ ) { thisPixelNum = iY * iYMax + iX; // Write Color to the File fwrite( fractal[thisPixelNum].color, 1, 3, fp ); } } fclose( fp ); writeEnd = omp_get_wtime( ); // Final Tasks free( fractal ); if( display ) { printf( "Operation Complete!\n\n" ); } // Timing Calculations jobEnd = omp_get_wtime( ); mandelbrotTime = mandelbrotEnd - mandelbrotStart; writeTime = writeEnd - writeStart; totalTime = jobEnd - jobStart; addTime = totalTime - mandelbrotTime - writeTime; // Timing Display if( SHOW_TIMES ) { if( display ) { printf( " === Timing Data ===\n Mandelbrot:\t\t" ); } printf( "%0.9lf ", mandelbrotTime ); if( display ) { printf( "\n Image Write:\t\t" ); printf( "%0.9lf\n", writeTime ); printf( " Ending Overhead:\t" ); printf( "%0.9lf\n", addTime ); printf( " Total Job Time:\t" ); printf( "%0.9lf\n\n", totalTime ); } } return 0; }
Friends.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.2 -------------------------------------------------*/ /* date: 10/06/2011 --------------------------------------------*/ /* authors: Aydin Buluc (abuluc@lbl.gov), Adam Lugowski --------*/ /****************************************************************/ /* Copyright (c) 2011, Aydin Buluc Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _FRIENDS_H_ #define _FRIENDS_H_ #include <iostream> #include "SpMat.h" // Best to include the base class first #include "SpHelper.h" #include "StackEntry.h" #include "Isect.h" #include "Deleter.h" #include "SpImpl.h" #include "SpImplNoSR.h" #include "SpParHelper.h" #include "Compare.h" #include "CombBLAS.h" using namespace std; template <class IU, class NU> class SpTuples; template <class IU, class NU> class SpDCCols; template <class IU, class NU> class Dcsc; /*************************************************************************************************/ /**************************** SHARED ADDRESS SPACE FRIEND FUNCTIONS ******************************/ /****************************** MULTITHREADED LOGIC ALSO GOES HERE *******************************/ /*************************************************************************************************/ //! SpMV with dense vector template <typename SR, typename IU, typename NU, typename RHS, typename LHS> void dcsc_gespmv (const SpDCCols<IU, NU> & A, const RHS * x, LHS * y) { if(A.nnz > 0) { for(IU j =0; j<A.dcsc->nzc; ++j) // for all nonzero columns { IU colid = A.dcsc->jc[j]; for(IU i = A.dcsc->cp[j]; i< A.dcsc->cp[j+1]; ++i) { IU rowid = A.dcsc->ir[i]; SR::axpy(A.dcsc->numx[i], x[colid], y[rowid]); if (SR::returnedSAID()) { cout << "the semiring returned SAID but that is not implemented. results will be incorrect." << endl; throw string("the semiring returned SAID but that is not implemented. results will be incorrect."); } } } } } /** * Multithreaded SpMV with sparse vector * the assembly of outgoing buffers sendindbuf/sendnumbuf are done here */ template <typename SR, typename IU, typename NUM, typename IVT, typename OVT> int dcsc_gespmv_threaded (const SpDCCols<IU, NUM> & A, const int32_t * indx, const IVT * numx, int32_t nnzx, int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int p_c) { // FACTS: Split boundaries (for multithreaded execution) are independent of recipient boundaries // Two splits might create output to the same recipient (needs to be merged) // However, each split's output is distinct (no duplicate elimination is needed after merge) sdispls = new int[p_c](); // initialize to zero (as all indy might be empty) if(A.getnnz() > 0 && nnzx > 0) { int splits = A.getnsplit(); if(splits > 0) { int32_t nlocrows = static_cast<int32_t>(A.getnrow()); int32_t perpiece = nlocrows / splits; vector< vector< int32_t > > indy(splits); vector< vector< OVT > > numy(splits); // Parallelize with OpenMP #ifdef _OPENMP #pragma omp parallel for // num_threads(6) #endif for(int i=0; i<splits; ++i) { if(i != splits-1) SpMXSpV_ForThreading<SR>(*(A.GetDCSC(i)), perpiece, indx, numx, nnzx, indy[i], numy[i], i*perpiece); else SpMXSpV_ForThreading<SR>(*(A.GetDCSC(i)), nlocrows - perpiece*i, indx, numx, nnzx, indy[i], numy[i], i*perpiece); } vector<int> accum(splits+1, 0); for(int i=0; i<splits; ++i) accum[i+1] = accum[i] + indy[i].size(); sendindbuf = new int32_t[accum[splits]]; sendnumbuf = new OVT[accum[splits]]; int32_t perproc = nlocrows / p_c; int32_t last_rec = p_c-1; // keep recipients of last entries in each split (-1 for an empty split) // so that we can delete indy[] and numy[] contents as soon as they are processed vector<int32_t> end_recs(splits); for(int i=0; i<splits; ++i) { if(indy[i].empty()) end_recs[i] = -1; else end_recs[i] = min(indy[i].back() / perproc, last_rec); } #ifdef _OPENMP #pragma omp parallel for // num_threads(6) #endif for(int i=0; i<splits; ++i) { if(!indy[i].empty()) // guarantee that .begin() and .end() are not null { // FACT: Data is sorted, so if the recipient of begin is the same as the owner of end, // then the whole data is sent to the same processor int32_t beg_rec = min( indy[i].front() / perproc, last_rec); // We have to test the previous "split", to see if we are marking a "recipient head" // set displacement markers for the completed (previous) buffers only if(i != 0) { int k = i-1; while (k >= 0 && end_recs[k] == -1) k--; // loop backwards until seeing an non-empty split if(k >= 0) // we found a non-empty split { fill(sdispls+end_recs[k]+1, sdispls+beg_rec+1, accum[i]); // last entry to be set is sdispls[beg_rec] } // else fill sdispls[1...beg_rec] with zero (already done) } // else set sdispls[0] to zero (already done) if(beg_rec == end_recs[i]) // fast case { transform(indy[i].begin(), indy[i].end(), indy[i].begin(), bind2nd(minus<int32_t>(), perproc*beg_rec)); copy(indy[i].begin(), indy[i].end(), sendindbuf+accum[i]); copy(numy[i].begin(), numy[i].end(), sendnumbuf+accum[i]); } else // slow case { // FACT: No matter how many splits or threads, there will be only one "recipient head" // Therefore there are no race conditions for marking send displacements (sdispls) int end = indy[i].size(); for(int cur=0; cur< end; ++cur) { int32_t cur_rec = min( indy[i][cur] / perproc, last_rec); while(beg_rec != cur_rec) { sdispls[++beg_rec] = accum[i] + cur; // first entry to be set is sdispls[beg_rec+1] } sendindbuf[ accum[i] + cur ] = indy[i][cur] - perproc*beg_rec; // convert to receiver's local index sendnumbuf[ accum[i] + cur ] = numy[i][cur]; } } vector<int32_t>().swap(indy[i]); vector<OVT>().swap(numy[i]); bool lastnonzero = true; // am I the last nonzero split? for(int k=i+1; k < splits; ++k) { if(end_recs[k] != -1) lastnonzero = false; } if(lastnonzero) fill(sdispls+end_recs[i]+1, sdispls+p_c, accum[i+1]); } // end_if(!indy[i].empty) } // end parallel for return accum[splits]; } else { cout << "Something is wrong, splits should be nonzero for multithreaded execution" << endl; return 0; } } else { sendindbuf = NULL; sendnumbuf = NULL; return 0; } } /** * Multithreaded SpMV with sparse vector and preset buffers * the assembly of outgoing buffers sendindbuf/sendnumbuf are done here * IVT: input vector numerical type * OVT: output vector numerical type */ template <typename SR, typename IU, typename NUM, typename IVT, typename OVT> void dcsc_gespmv_threaded_setbuffers (const SpDCCols<IU, NUM> & A, const int32_t * indx, const IVT * numx, int32_t nnzx, int32_t * sendindbuf, OVT * sendnumbuf, int * cnts, int * dspls, int p_c) { if(A.getnnz() > 0 && nnzx > 0) { int splits = A.getnsplit(); if(splits > 0) { vector< vector<int32_t> > indy(splits); vector< vector< OVT > > numy(splits); int32_t nlocrows = static_cast<int32_t>(A.getnrow()); int32_t perpiece = nlocrows / splits; #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0; i<splits; ++i) { if(i != splits-1) SpMXSpV_ForThreading<SR>(*(A.GetDCSC(i)), perpiece, indx, numx, nnzx, indy[i], numy[i], i*perpiece); else SpMXSpV_ForThreading<SR>(*(A.GetDCSC(i)), nlocrows - perpiece*i, indx, numx, nnzx, indy[i], numy[i], i*perpiece); } int32_t perproc = nlocrows / p_c; int32_t last_rec = p_c-1; // keep recipients of last entries in each split (-1 for an empty split) // so that we can delete indy[] and numy[] contents as soon as they are processed vector<int32_t> end_recs(splits); for(int i=0; i<splits; ++i) { if(indy[i].empty()) end_recs[i] = -1; else end_recs[i] = min(indy[i].back() / perproc, last_rec); } int ** loc_rec_cnts = new int *[splits]; #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0; i<splits; ++i) { loc_rec_cnts[i] = new int[p_c](); // thread-local recipient data if(!indy[i].empty()) // guarantee that .begin() and .end() are not null { int32_t cur_rec = min( indy[i].front() / perproc, last_rec); int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient for(typename vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it) { if( ( (*it) >= lastdata ) && cur_rec != last_rec ) { cur_rec = min( (*it) / perproc, last_rec); lastdata = (cur_rec+1) * perproc; } ++loc_rec_cnts[i][cur_rec]; } } } #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0; i<splits; ++i) { if(!indy[i].empty()) // guarantee that .begin() and .end() are not null { // FACT: Data is sorted, so if the recipient of begin is the same as the owner of end, // then the whole data is sent to the same processor int32_t beg_rec = min( indy[i].front() / perproc, last_rec); int32_t alreadysent = 0; // already sent per recipient for(int before = i-1; before >= 0; before--) alreadysent += loc_rec_cnts[before][beg_rec]; if(beg_rec == end_recs[i]) // fast case { transform(indy[i].begin(), indy[i].end(), indy[i].begin(), bind2nd(minus<int32_t>(), perproc*beg_rec)); copy(indy[i].begin(), indy[i].end(), sendindbuf + dspls[beg_rec] + alreadysent); copy(numy[i].begin(), numy[i].end(), sendnumbuf + dspls[beg_rec] + alreadysent); } else // slow case { int32_t cur_rec = beg_rec; int32_t lastdata = (cur_rec+1) * perproc; // one past last entry that goes to this current recipient for(typename vector<int32_t>::iterator it = indy[i].begin(); it != indy[i].end(); ++it) { if( ( (*it) >= lastdata ) && cur_rec != last_rec ) { cur_rec = min( (*it) / perproc, last_rec); lastdata = (cur_rec+1) * perproc; // if this split switches to a new recipient after sending some data // then it's sure that no data has been sent to that recipient yet alreadysent = 0; } sendindbuf[ dspls[cur_rec] + alreadysent ] = (*it) - perproc*cur_rec; // convert to receiver's local index sendnumbuf[ dspls[cur_rec] + (alreadysent++) ] = *(numy[i].begin() + (it-indy[i].begin())); } } } } // Deallocated rec counts serially once all threads complete for(int i=0; i< splits; ++i) { for(int j=0; j< p_c; ++j) cnts[j] += loc_rec_cnts[i][j]; delete [] loc_rec_cnts[i]; } delete [] loc_rec_cnts; } else { cout << "Something is wrong, splits should be nonzero for multithreaded execution" << endl; } } } //! SpMV with sparse vector //! MIND: Matrix index type //! VIND: Vector index type (optimized: int32_t, general: int64_t) template <typename SR, typename MIND, typename VIND, typename NUM, typename IVT, typename OVT> void dcsc_gespmv (const SpDCCols<MIND, NUM> & A, const VIND * indx, const IVT * numx, VIND nnzx, vector<VIND> & indy, vector<OVT> & numy) { if(A.getnnz() > 0 && nnzx > 0) { if(A.getnsplit() > 0) { cout << "Call dcsc_gespmv_threaded instead" << endl; } else { SpMXSpV<SR>(*(A.GetDCSC()), (VIND) A.getnrow(), indx, numx, nnzx, indy, numy); } } } /** SpMV with sparse vector * @param[in] indexisvalue is only used for BFS-like computations, if true then we can call the optimized version that skips SPA */ template <typename SR, typename IU, typename NUM, typename IVT, typename OVT> void dcsc_gespmv (const SpDCCols<IU, NUM> & A, const int32_t * indx, const IVT * numx, int32_t nnzx, int32_t * indy, OVT * numy, int * cnts, int * dspls, int p_c, bool indexisvalue) { if(A.getnnz() > 0 && nnzx > 0) { if(A.getnsplit() > 0) { SpParHelper::Print("Call dcsc_gespmv_threaded instead\n"); } else { if(indexisvalue) { int32_t localm = (int32_t) A.getnrow(); BitMap * isthere = new BitMap(localm); SpMXSpV(*(A.GetDCSC()), localm, indx, numx, nnzx, indy, numy, cnts, dspls, p_c, isthere); delete isthere; } else SpMXSpV<SR>(*(A.GetDCSC()), (int32_t) A.getnrow(), indx, numx, nnzx, indy, numy, cnts, dspls, p_c); } } } template<typename IU> void BooleanRowSplit(SpDCCols<IU, bool> & A, int numsplits) { A.splits = numsplits; IU perpiece = A.m / A.splits; vector<IU> prevcolids(A.splits, -1); // previous column id's are set to -1 vector<IU> nzcs(A.splits, 0); vector<IU> nnzs(A.splits, 0); vector < vector < pair<IU,IU> > > colrowpairs(A.splits); if(A.nnz > 0 && A.dcsc != NULL) { for(IU i=0; i< A.dcsc->nzc; ++i) { for(IU j = A.dcsc->cp[i]; j< A.dcsc->cp[i+1]; ++j) { IU colid = A.dcsc->jc[i]; IU rowid = A.dcsc->ir[j]; IU owner = min(rowid / perpiece, static_cast<IU>(A.splits-1)); colrowpairs[owner].push_back(make_pair(colid, rowid - owner*perpiece)); if(prevcolids[owner] != colid) { prevcolids[owner] = colid; ++nzcs[owner]; } ++nnzs[owner]; } } } delete A.dcsc; // claim memory //copy(nzcs.begin(), nzcs.end(), ostream_iterator<IU>(cout," " )); cout << endl; //copy(nnzs.begin(), nnzs.end(), ostream_iterator<IU>(cout," " )); cout << endl; A.dcscarr = new Dcsc<IU,bool>*[A.splits]; // To be parallelized with OpenMP for(int i=0; i< A.splits; ++i) { sort(colrowpairs[i].begin(), colrowpairs[i].end()); // sort w.r.t. columns A.dcscarr[i] = new Dcsc<IU,bool>(nnzs[i],nzcs[i]); fill(A.dcscarr[i]->numx, A.dcscarr[i]->numx+nnzs[i], static_cast<bool>(1)); IU curnzc = 0; // number of nonzero columns constructed so far IU cindex = colrowpairs[i][0].first; IU rindex = colrowpairs[i][0].second; A.dcscarr[i]->ir[0] = rindex; A.dcscarr[i]->jc[curnzc] = cindex; A.dcscarr[i]->cp[curnzc++] = 0; for(IU j=1; j<nnzs[i]; ++j) { cindex = colrowpairs[i][j].first; rindex = colrowpairs[i][j].second; A.dcscarr[i]->ir[j] = rindex; if(cindex != A.dcscarr[i]->jc[curnzc-1]) { A.dcscarr[i]->jc[curnzc] = cindex; A.dcscarr[i]->cp[curnzc++] = j; } } A.dcscarr[i]->cp[curnzc] = nnzs[i]; } } /** * SpTuples(A*B') (Using OuterProduct Algorithm) * Returns the tuples for efficient merging later * Support mixed precision multiplication * The multiplication is on the specified semiring (passed as parameter) */ template<class SR, class NUO, class IU, class NU1, class NU2> SpTuples<IU, NUO> * Tuples_AnXBt (const SpDCCols<IU, NU1> & A, const SpDCCols<IU, NU2> & B, bool clearA = false, bool clearB = false) { IU mdim = A.m; IU ndim = B.m; // B is already transposed if(A.isZero() || B.isZero()) { if(clearA) delete const_cast<SpDCCols<IU, NU1> *>(&A); if(clearB) delete const_cast<SpDCCols<IU, NU2> *>(&B); return new SpTuples< IU, NUO >(0, mdim, ndim); // just return an empty matrix } Isect<IU> *isect1, *isect2, *itr1, *itr2, *cols, *rows; SpHelper::SpIntersect(*(A.dcsc), *(B.dcsc), cols, rows, isect1, isect2, itr1, itr2); IU kisect = static_cast<IU>(itr1-isect1); // size of the intersection ((itr1-isect1) == (itr2-isect2)) if(kisect == 0) { if(clearA) delete const_cast<SpDCCols<IU, NU1> *>(&A); if(clearB) delete const_cast<SpDCCols<IU, NU2> *>(&B); DeleteAll(isect1, isect2, cols, rows); return new SpTuples< IU, NUO >(0, mdim, ndim); } StackEntry< NUO, pair<IU,IU> > * multstack; IU cnz = SpHelper::SpCartesian< SR > (*(A.dcsc), *(B.dcsc), kisect, isect1, isect2, multstack); DeleteAll(isect1, isect2, cols, rows); if(clearA) delete const_cast<SpDCCols<IU, NU1> *>(&A); if(clearB) delete const_cast<SpDCCols<IU, NU2> *>(&B); return new SpTuples<IU, NUO> (cnz, mdim, ndim, multstack); } /** * SpTuples(A*B) (Using ColByCol Algorithm) * Returns the tuples for efficient merging later * Support mixed precision multiplication * The multiplication is on the specified semiring (passed as parameter) */ template<class SR, class NUO, class IU, class NU1, class NU2> SpTuples<IU, NUO> * Tuples_AnXBn (const SpDCCols<IU, NU1> & A, const SpDCCols<IU, NU2> & B, bool clearA = false, bool clearB = false) { IU mdim = A.m; IU ndim = B.n; if(A.isZero() || B.isZero()) { return new SpTuples<IU, NUO>(0, mdim, ndim); } StackEntry< NUO, pair<IU,IU> > * multstack; IU cnz = SpHelper::SpColByCol< SR > (*(A.dcsc), *(B.dcsc), A.n, multstack); if(clearA) delete const_cast<SpDCCols<IU, NU1> *>(&A); if(clearB) delete const_cast<SpDCCols<IU, NU2> *>(&B); return new SpTuples<IU, NUO> (cnz, mdim, ndim, multstack); } template<class SR, class NUO, class IU, class NU1, class NU2> SpTuples<IU, NUO> * Tuples_AtXBt (const SpDCCols<IU, NU1> & A, const SpDCCols<IU, NU2> & B, bool clearA = false, bool clearB = false) { IU mdim = A.n; IU ndim = B.m; cout << "Tuples_AtXBt function has not been implemented yet !" << endl; return new SpTuples<IU, NUO> (0, mdim, ndim); } template<class SR, class NUO, class IU, class NU1, class NU2> SpTuples<IU, NUO> * Tuples_AtXBn (const SpDCCols<IU, NU1> & A, const SpDCCols<IU, NU2> & B, bool clearA = false, bool clearB = false) { IU mdim = A.n; IU ndim = B.n; cout << "Tuples_AtXBn function has not been implemented yet !" << endl; return new SpTuples<IU, NUO> (0, mdim, ndim); } // Performs a balanced merge of the array of SpTuples // Assumes the input parameters are already column sorted template<class SR, class IU, class NU> SpTuples<IU,NU> MergeAll( const vector<SpTuples<IU,NU> *> & ArrSpTups, IU mstar = 0, IU nstar = 0, bool delarrs = false ) { int hsize = ArrSpTups.size(); if(hsize == 0) { return SpTuples<IU,NU>(0, mstar,nstar); } else { mstar = ArrSpTups[0]->m; nstar = ArrSpTups[0]->n; } for(int i=1; i< hsize; ++i) { if((mstar != ArrSpTups[i]->m) || nstar != ArrSpTups[i]->n) { cerr << "Dimensions do not match on MergeAll()" << endl; return SpTuples<IU,NU>(0,0,0); } } if(hsize > 1) { ColLexiCompare<IU,int> heapcomp; tuple<IU, IU, int> * heap = new tuple<IU, IU, int> [hsize]; // (rowindex, colindex, source-id) IU * curptr = new IU[hsize]; fill_n(curptr, hsize, static_cast<IU>(0)); IU estnnz = 0; for(int i=0; i< hsize; ++i) { estnnz += ArrSpTups[i]->getnnz(); heap[i] = make_tuple(get<0>(ArrSpTups[i]->tuples[0]), get<1>(ArrSpTups[i]->tuples[0]), i); } make_heap(heap, heap+hsize, not2(heapcomp)); tuple<IU, IU, NU> * ntuples = new tuple<IU,IU,NU>[estnnz]; IU cnz = 0; while(hsize > 0) { pop_heap(heap, heap + hsize, not2(heapcomp)); // result is stored in heap[hsize-1] int source = get<2>(heap[hsize-1]); if( (cnz != 0) && ((get<0>(ntuples[cnz-1]) == get<0>(heap[hsize-1])) && (get<1>(ntuples[cnz-1]) == get<1>(heap[hsize-1]))) ) { get<2>(ntuples[cnz-1]) = SR::add(get<2>(ntuples[cnz-1]), ArrSpTups[source]->numvalue(curptr[source]++)); } else { ntuples[cnz++] = ArrSpTups[source]->tuples[curptr[source]++]; } if(curptr[source] != ArrSpTups[source]->getnnz()) // That array has not been depleted { heap[hsize-1] = make_tuple(get<0>(ArrSpTups[source]->tuples[curptr[source]]), get<1>(ArrSpTups[source]->tuples[curptr[source]]), source); push_heap(heap, heap+hsize, not2(heapcomp)); } else { --hsize; } } SpHelper::ShrinkArray(ntuples, cnz); DeleteAll(heap, curptr); if(delarrs) { for(size_t i=0; i<ArrSpTups.size(); ++i) delete ArrSpTups[i]; } return SpTuples<IU,NU> (cnz, mstar, nstar, ntuples); } else { SpTuples<IU,NU> ret = *ArrSpTups[0]; if(delarrs) delete ArrSpTups[0]; return ret; } } /** * @param[in] exclude if false, * \n then operation is A = A .* B * \n else operation is A = A .* not(B) **/ template <typename IU, typename NU1, typename NU2> Dcsc<IU, typename promote_trait<NU1,NU2>::T_promote> EWiseMult(const Dcsc<IU,NU1> & A, const Dcsc<IU,NU2> * B, bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; IU estnzc, estnz; if(exclude) { estnzc = A.nzc; estnz = A.nz; } else { estnzc = std::min(A.nzc, B->nzc); estnz = std::min(A.nz, B->nz); } Dcsc<IU,N_promote> temp(estnz, estnzc); IU curnzc = 0; IU curnz = 0; IU i = 0; IU j = 0; temp.cp[0] = 0; if(!exclude) // A = A .* B { while(i< A.nzc && B != NULL && j<B->nzc) { if(A.jc[i] > B->jc[j]) ++j; else if(A.jc[i] < B->jc[j]) ++i; else { IU ii = A.cp[i]; IU jj = B->cp[j]; IU prevnz = curnz; while (ii < A.cp[i+1] && jj < B->cp[j+1]) { if (A.ir[ii] < B->ir[jj]) ++ii; else if (A.ir[ii] > B->ir[jj]) ++jj; else { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = A.numx[ii++] * B->numx[jj++]; } } if(prevnz < curnz) // at least one nonzero exists in this column { temp.jc[curnzc++] = A.jc[i]; temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } ++i; ++j; } } } else // A = A .* not(B) { while(i< A.nzc && B != NULL && j< B->nzc) { if(A.jc[i] > B->jc[j]) ++j; else if(A.jc[i] < B->jc[j]) { temp.jc[curnzc++] = A.jc[i++]; for(IU k = A.cp[i-1]; k< A.cp[i]; k++) { temp.ir[curnz] = A.ir[k]; temp.numx[curnz++] = A.numx[k]; } temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]); } else { IU ii = A.cp[i]; IU jj = B->cp[j]; IU prevnz = curnz; while (ii < A.cp[i+1] && jj < B->cp[j+1]) { if (A.ir[ii] > B->ir[jj]) ++jj; else if (A.ir[ii] < B->ir[jj]) { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = A.numx[ii++]; } else // eliminate those existing nonzeros { ++ii; ++jj; } } while (ii < A.cp[i+1]) { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = A.numx[ii++]; } if(prevnz < curnz) // at least one nonzero exists in this column { temp.jc[curnzc++] = A.jc[i]; temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } ++i; ++j; } } while(i< A.nzc) { temp.jc[curnzc++] = A.jc[i++]; for(IU k = A.cp[i-1]; k< A.cp[i]; ++k) { temp.ir[curnz] = A.ir[k]; temp.numx[curnz++] = A.numx[k]; } temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]); } } temp.Resize(curnzc, curnz); return temp; } template <typename N_promote, typename IU, typename NU1, typename NU2, typename _BinaryOperation> Dcsc<IU, N_promote> EWiseApply(const Dcsc<IU,NU1> & A, const Dcsc<IU,NU2> * B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { //typedef typename promote_trait<NU1,NU2>::T_promote N_promote; IU estnzc, estnz; if(notB) { estnzc = A.nzc; estnz = A.nz; } else { estnzc = std::min(A.nzc, B->nzc); estnz = std::min(A.nz, B->nz); } Dcsc<IU,N_promote> temp(estnz, estnzc); IU curnzc = 0; IU curnz = 0; IU i = 0; IU j = 0; temp.cp[0] = 0; if(!notB) // A = A .* B { while(i< A.nzc && B != NULL && j<B->nzc) { if(A.jc[i] > B->jc[j]) ++j; else if(A.jc[i] < B->jc[j]) ++i; else { IU ii = A.cp[i]; IU jj = B->cp[j]; IU prevnz = curnz; while (ii < A.cp[i+1] && jj < B->cp[j+1]) { if (A.ir[ii] < B->ir[jj]) ++ii; else if (A.ir[ii] > B->ir[jj]) ++jj; else { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = __binary_op(A.numx[ii++], B->numx[jj++]); } } if(prevnz < curnz) // at least one nonzero exists in this column { temp.jc[curnzc++] = A.jc[i]; temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } ++i; ++j; } } } else // A = A .* not(B) { while(i< A.nzc && B != NULL && j< B->nzc) { if(A.jc[i] > B->jc[j]) ++j; else if(A.jc[i] < B->jc[j]) { temp.jc[curnzc++] = A.jc[i++]; for(IU k = A.cp[i-1]; k< A.cp[i]; k++) { temp.ir[curnz] = A.ir[k]; temp.numx[curnz++] = __binary_op(A.numx[k], defaultBVal); } temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]); } else { IU ii = A.cp[i]; IU jj = B->cp[j]; IU prevnz = curnz; while (ii < A.cp[i+1] && jj < B->cp[j+1]) { if (A.ir[ii] > B->ir[jj]) ++jj; else if (A.ir[ii] < B->ir[jj]) { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = __binary_op(A.numx[ii++], defaultBVal); } else // eliminate those existing nonzeros { ++ii; ++jj; } } while (ii < A.cp[i+1]) { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = __binary_op(A.numx[ii++], defaultBVal); } if(prevnz < curnz) // at least one nonzero exists in this column { temp.jc[curnzc++] = A.jc[i]; temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } ++i; ++j; } } while(i< A.nzc) { temp.jc[curnzc++] = A.jc[i++]; for(IU k = A.cp[i-1]; k< A.cp[i]; ++k) { temp.ir[curnz] = A.ir[k]; temp.numx[curnz++] = __binary_op(A.numx[k], defaultBVal); } temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]); } } temp.Resize(curnzc, curnz); return temp; } template<typename IU, typename NU1, typename NU2> SpDCCols<IU, typename promote_trait<NU1,NU2>::T_promote > EWiseMult (const SpDCCols<IU,NU1> & A, const SpDCCols<IU,NU2> & B, bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; assert(A.m == B.m); assert(A.n == B.n); Dcsc<IU, N_promote> * tdcsc = NULL; if(A.nnz > 0 && B.nnz > 0) { tdcsc = new Dcsc<IU, N_promote>(EWiseMult(*(A.dcsc), B.dcsc, exclude)); return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc); } else if (A.nnz > 0 && exclude) // && B.nnz == 0 { tdcsc = new Dcsc<IU, N_promote>(EWiseMult(*(A.dcsc), (const Dcsc<IU,NU2>*)NULL, exclude)); return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc); } else { return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc); } } template<typename N_promote, typename IU, typename NU1, typename NU2, typename _BinaryOperation> SpDCCols<IU, N_promote> EWiseApply (const SpDCCols<IU,NU1> & A, const SpDCCols<IU,NU2> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { //typedef typename promote_trait<NU1,NU2>::T_promote N_promote; assert(A.m == B.m); assert(A.n == B.n); Dcsc<IU, N_promote> * tdcsc = NULL; if(A.nnz > 0 && B.nnz > 0) { tdcsc = new Dcsc<IU, N_promote>(EWiseApply<N_promote>(*(A.dcsc), B.dcsc, __binary_op, notB, defaultBVal)); return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc); } else if (A.nnz > 0 && notB) // && B.nnz == 0 { tdcsc = new Dcsc<IU, N_promote>(EWiseApply<N_promote>(*(A.dcsc), (const Dcsc<IU,NU2>*)NULL, __binary_op, notB, defaultBVal)); return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc); } else { return SpDCCols<IU, N_promote> (A.m , A.n, tdcsc); } } /** * Implementation based on operator += * Element wise apply with the following constraints * The operation to be performed is __binary_op * The operation `c = __binary_op(a, b)` is only performed if `do_op(a, b)` returns true * If allowANulls is true, then if A is missing an element that B has, then ANullVal is used * In that case the operation becomes c[i,j] = __binary_op(ANullVal, b[i,j]) * If both allowANulls and allowBNulls is false then the function degenerates into intersection */ template <typename RETT, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> Dcsc<IU, RETT> EWiseApply(const Dcsc<IU,NU1> * Ap, const Dcsc<IU,NU2> * Bp, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect) { if (Ap == NULL && Bp == NULL) return Dcsc<IU,RETT>(0, 0); if (Ap == NULL && Bp != NULL) { if (!allowANulls) return Dcsc<IU,RETT>(0, 0); const Dcsc<IU,NU2> & B = *Bp; IU estnzc = B.nzc; IU estnz = B.nz; Dcsc<IU,RETT> temp(estnz, estnzc); IU curnzc = 0; IU curnz = 0; //IU i = 0; IU j = 0; temp.cp[0] = 0; while(j<B.nzc) { // Based on the if statement below which handles A null values. j++; IU prevnz = curnz; temp.jc[curnzc++] = B.jc[j-1]; for(IU k = B.cp[j-1]; k< B.cp[j]; ++k) { if (do_op(ANullVal, B.numx[k], true, false)) { temp.ir[curnz] = B.ir[k]; temp.numx[curnz++] = __binary_op(ANullVal, B.numx[k], true, false); } } //temp.cp[curnzc] = temp.cp[curnzc-1] + (B.cp[j] - B.cp[j-1]); temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } temp.Resize(curnzc, curnz); return temp; } if (Ap != NULL && Bp == NULL) { if (!allowBNulls) return Dcsc<IU,RETT>(0, 0); const Dcsc<IU,NU1> & A = *Ap; IU estnzc = A.nzc; IU estnz = A.nz; Dcsc<IU,RETT> temp(estnz, estnzc); IU curnzc = 0; IU curnz = 0; IU i = 0; //IU j = 0; temp.cp[0] = 0; while(i< A.nzc) { i++; IU prevnz = curnz; temp.jc[curnzc++] = A.jc[i-1]; for(IU k = A.cp[i-1]; k< A.cp[i]; k++) { if (do_op(A.numx[k], BNullVal, false, true)) { temp.ir[curnz] = A.ir[k]; temp.numx[curnz++] = __binary_op(A.numx[k], BNullVal, false, true); } } //temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]); temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } temp.Resize(curnzc, curnz); return temp; } // both A and B are non-NULL at this point const Dcsc<IU,NU1> & A = *Ap; const Dcsc<IU,NU2> & B = *Bp; IU estnzc = A.nzc + B.nzc; IU estnz = A.nz + B.nz; Dcsc<IU,RETT> temp(estnz, estnzc); IU curnzc = 0; IU curnz = 0; IU i = 0; IU j = 0; temp.cp[0] = 0; while(i< A.nzc && j<B.nzc) { if(A.jc[i] > B.jc[j]) { j++; if (allowANulls) { IU prevnz = curnz; temp.jc[curnzc++] = B.jc[j-1]; for(IU k = B.cp[j-1]; k< B.cp[j]; ++k) { if (do_op(ANullVal, B.numx[k], true, false)) { temp.ir[curnz] = B.ir[k]; temp.numx[curnz++] = __binary_op(ANullVal, B.numx[k], true, false); } } //temp.cp[curnzc] = temp.cp[curnzc-1] + (B.cp[j] - B.cp[j-1]); temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } } else if(A.jc[i] < B.jc[j]) { i++; if (allowBNulls) { IU prevnz = curnz; temp.jc[curnzc++] = A.jc[i-1]; for(IU k = A.cp[i-1]; k< A.cp[i]; k++) { if (do_op(A.numx[k], BNullVal, false, true)) { temp.ir[curnz] = A.ir[k]; temp.numx[curnz++] = __binary_op(A.numx[k], BNullVal, false, true); } } //temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]); temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } } else { temp.jc[curnzc++] = A.jc[i]; IU ii = A.cp[i]; IU jj = B.cp[j]; IU prevnz = curnz; while (ii < A.cp[i+1] && jj < B.cp[j+1]) { if (A.ir[ii] < B.ir[jj]) { if (allowBNulls && do_op(A.numx[ii], BNullVal, false, true)) { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = __binary_op(A.numx[ii++], BNullVal, false, true); } else ii++; } else if (A.ir[ii] > B.ir[jj]) { if (allowANulls && do_op(ANullVal, B.numx[jj], true, false)) { temp.ir[curnz] = B.ir[jj]; temp.numx[curnz++] = __binary_op(ANullVal, B.numx[jj++], true, false); } else jj++; } else { if (allowIntersect && do_op(A.numx[ii], B.numx[jj], false, false)) { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = __binary_op(A.numx[ii++], B.numx[jj++], false, false); // might include zeros } else { ii++; jj++; } } } while (ii < A.cp[i+1]) { if (allowBNulls && do_op(A.numx[ii], BNullVal, false, true)) { temp.ir[curnz] = A.ir[ii]; temp.numx[curnz++] = __binary_op(A.numx[ii++], BNullVal, false, true); } else ii++; } while (jj < B.cp[j+1]) { if (allowANulls && do_op(ANullVal, B.numx[jj], true, false)) { temp.ir[curnz] = B.ir[jj]; temp.numx[curnz++] = __binary_op(ANullVal, B.numx[jj++], true, false); } else jj++; } temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; ++i; ++j; } } while(allowBNulls && i< A.nzc) // remaining A elements after B ran out { IU prevnz = curnz; temp.jc[curnzc++] = A.jc[i++]; for(IU k = A.cp[i-1]; k< A.cp[i]; ++k) { if (do_op(A.numx[k], BNullVal, false, true)) { temp.ir[curnz] = A.ir[k]; temp.numx[curnz++] = __binary_op(A.numx[k], BNullVal, false, true); } } //temp.cp[curnzc] = temp.cp[curnzc-1] + (A.cp[i] - A.cp[i-1]); temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } while(allowANulls && j < B.nzc) // remaining B elements after A ran out { IU prevnz = curnz; temp.jc[curnzc++] = B.jc[j++]; for(IU k = B.cp[j-1]; k< B.cp[j]; ++k) { if (do_op(ANullVal, B.numx[k], true, false)) { temp.ir[curnz] = B.ir[k]; temp.numx[curnz++] = __binary_op(ANullVal, B.numx[k], true, false); } } //temp.cp[curnzc] = temp.cp[curnzc-1] + (B.cp[j] - B.cp[j-1]); temp.cp[curnzc] = temp.cp[curnzc-1] + curnz-prevnz; } temp.Resize(curnzc, curnz); return temp; } template <typename RETT, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> SpDCCols<IU,RETT> EWiseApply (const SpDCCols<IU,NU1> & A, const SpDCCols<IU,NU2> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect) { assert(A.m == B.m); assert(A.n == B.n); Dcsc<IU, RETT> * tdcsc = new Dcsc<IU, RETT>(EWiseApply<RETT>(A.dcsc, B.dcsc, __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect)); return SpDCCols<IU, RETT> (A.m , A.n, tdcsc); } #endif
J1OrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #include "QMCWaveFunctions/WaveFunctionComponent.h" #include "QMCWaveFunctions/Jastrow/DiffOneBodyJastrowOrbital.h" #include <Utilities/qmc_common.h> #include <CPU/SIMD/aligned_allocator.hpp> #include <CPU/SIMD/algorithm.hpp> #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for one-body Jastrow function using multiple functors */ template<class FT> struct J1OrbitalSoA : public WaveFunctionComponent { ///alias FuncType using FuncType = FT; ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using DistRow = DistanceTableData::DistRow; using DisplRow = DistanceTableData::DisplRow; ///table index const int myTableID; ///number of ions int Nions; ///number of electrons int Nelec; ///number of groups int NumGroups; ///reference to the sources (ions) const ParticleSet& Ions; valT curAt; valT curLap; posT curGrad; ///\f$Vat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Vat; aligned_vector<valT> U, dU, d2U, d3U; aligned_vector<valT> DistCompressed; aligned_vector<int> DistIndice; Vector<posT> Grad; Vector<valT> Lap; ///Container for \f$F[ig*NumGroups+jg]\f$ std::vector<FT*> F; J1OrbitalSoA(const std::string& obj_name, const ParticleSet& ions, ParticleSet& els) : WaveFunctionComponent("J1OrbitalSoA", obj_name), myTableID(els.addTable(ions)), Ions(ions) { if (myName.empty()) throw std::runtime_error("J1OrbitalSoA object name cannot be empty!"); initialize(els); } J1OrbitalSoA(const J1OrbitalSoA& rhs) = delete; ~J1OrbitalSoA() { for (int i = 0; i < F.size(); ++i) if (F[i] != nullptr) delete F[i]; } /* initialize storage */ void initialize(const ParticleSet& els) { Nions = Ions.getTotalNum(); NumGroups = Ions.getSpeciesSet().getTotalNum(); F.resize(std::max(NumGroups, 4), nullptr); if (NumGroups > 1 && !Ions.IsGrouped) { NumGroups = 0; } Nelec = els.getTotalNum(); Vat.resize(Nelec); Grad.resize(Nelec); Lap.resize(Nelec); U.resize(Nions); dU.resize(Nions); d2U.resize(Nions); d3U.resize(Nions); DistCompressed.resize(Nions); DistIndice.resize(Nions); } void addFunc(int source_type, FT* afunc, int target_type = -1) { if (F[source_type] != nullptr) delete F[source_type]; F[source_type] = afunc; } void recompute(ParticleSet& P) { const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { computeU3(P, iat, d_ie.getDistRow(iat)); Vat[iat] = simd::accumulate_n(U.data(), Nions, valT()); Lap[iat] = accumulateGL(dU.data(), d2U.data(), d_ie.getDisplRow(iat), Grad[iat]); } } LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi) { const DistanceTableData& d_ie(P.getDistTable(myTableID)); valT dudr, d2udr2; Tensor<valT, DIM> ident; grad_grad_psi = 0.0; ident.diagonal(1.0); for (int iel = 0; iel < Nelec; ++iel) { const auto& dist = d_ie.getDistRow(iel); const auto& displ = d_ie.getDisplRow(iel); for (int iat = 0; iat < Nions; iat++) { int gid = Ions.GroupID[iat]; auto* func = F[gid]; if (func != nullptr) { RealType r = dist[iat]; RealType rinv = 1.0 / r; PosType dr = displ[iat]; func->evaluate(r, dudr, d2udr2); grad_grad_psi[iel] -= rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv; } } } } PsiValueType ratio(ParticleSet& P, int iat) { UpdateMode = ORB_PBYP_RATIO; curAt = computeU(P.getDistTable(myTableID).getTempDists()); return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt)); } inline void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Vat[VP.refPtcl] - computeU(VP.getDistTable(myTableID).getDistRow(k))); } inline valT computeU(const DistRow& dist) { valT curVat(0); if (NumGroups > 0) { for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] != nullptr) curVat += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) curVat += F[gid]->evaluate(dist[c]); } } return curVat; } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const auto& dist = P.getDistTable(myTableID).getTempDists(); curAt = valT(0); if (NumGroups > 0) { for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] != nullptr) curAt += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) curAt += F[gid]->evaluate(dist[c]); } } for (int i = 0; i < Nelec; ++i) ratios[i] = std::exp(Vat[i] - curAt); } inline void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false) { if (fromscratch) recompute(P); for (size_t iat = 0; iat < Nelec; ++iat) G[iat] += Grad[iat]; for (size_t iat = 0; iat < Nelec; ++iat) L[iat] -= Lap[iat]; LogValue = -simd::accumulate_n(Vat.data(), Nelec, valT()); } /** compute gradient and lap * @return lap */ inline valT accumulateGL(const valT* restrict du, const valT* restrict d2u, const DisplRow& displ, posT& grad) const { valT lap(0); constexpr valT lapfac = OHMMS_DIM - RealType(1); //#pragma omp simd reduction(+:lap) for (int jat = 0; jat < Nions; ++jat) lap += d2u[jat] + lapfac * du[jat]; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); //#pragma omp simd reduction(+:s) for (int jat = 0; jat < Nions; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } return lap; } /** compute U, dU and d2U * @param P quantum particleset * @param iat the moving particle * @param dist starting address of the distances of the ions wrt the iat-th particle */ inline void computeU3(ParticleSet& P, int iat, const DistRow& dist) { if (NumGroups > 0) { //ions are grouped constexpr valT czero(0); std::fill_n(U.data(), Nions, czero); std::fill_n(dU.data(), Nions, czero); std::fill_n(d2U.data(), Nions, czero); for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] == nullptr) continue; F[jg]->evaluateVGL(-1, Ions.first(jg), Ions.last(jg), dist.data(), U.data(), dU.data(), d2U.data(), DistCompressed.data(), DistIndice.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) { U[c] = F[gid]->evaluate(dist[c], dU[c], d2U[c]); dU[c] /= dist[c]; } } } } /** compute the gradient during particle-by-particle update * @param P quantum particleset * @param iat particle index */ GradType evalGrad(ParticleSet& P, int iat) { return GradType(Grad[iat]); } /** compute the gradient during particle-by-particle update * @param P quantum particleset * @param iat particle index * * Using getTempDists(). curAt, curGrad and curLap are computed. */ PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; computeU3(P, iat, P.getDistTable(myTableID).getTempDists()); curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad); curAt = simd::accumulate_n(U.data(), Nions, valT()); grad_iat += curGrad; return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt)); } /** Rejected move. Nothing to do */ inline void restore(int iat) {} /** Accpted move. Update Vat[iat],Grad[iat] and Lap[iat] */ void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false) { if (UpdateMode == ORB_PBYP_RATIO) { computeU3(P, iat, P.getDistTable(myTableID).getTempDists()); curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad); } LogValue += Vat[iat] - curAt; Vat[iat] = curAt; Grad[iat] = curGrad; Lap[iat] = curLap; } inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Vat.begin(), Vat.end()); buf.add(Grad.begin(), Grad.end()); buf.add(Lap.begin(), Lap.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Vat.free(); Grad.free(); Lap.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Vat.attachReference(buf.lendReference<valT>(Nelec), Nelec); Grad.attachReference(buf.lendReference<posT>(Nelec), Nelec); Lap.attachReference(buf.lendReference<valT>(Nelec), Nelec); } WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const { J1OrbitalSoA<FT>* j1copy = new J1OrbitalSoA<FT>(myName, Ions, tqp); j1copy->Optimizable = Optimizable; for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) j1copy->addFunc(i, new FT(*F[i])); } if (dPsi) { j1copy->dPsi = dPsi->makeClone(tqp); } return j1copy; } /**@{ WaveFunctionComponent virtual functions that are not essential for the development */ void reportStatus(std::ostream& os) { for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) F[i]->myVars.print(os); } } void checkInVariables(opt_variables_type& active) { myVars.clear(); for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) { F[i]->checkInVariables(active); F[i]->checkInVariables(myVars); } } } void checkOutVariables(const opt_variables_type& active) { myVars.getIndex(active); Optimizable = myVars.is_optimizable(); for (size_t i = 0, n = F.size(); i < n; ++i) if (F[i] != nullptr) F[i]->checkOutVariables(active); if (dPsi) dPsi->checkOutVariables(active); } void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; for (size_t i = 0, n = F.size(); i < n; ++i) if (F[i] != nullptr) F[i]->resetParameters(active); for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } if (dPsi) dPsi->resetParameters(active); } /**@} */ inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc) { GradType g_return(0.0); const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { const auto& dist = d_ie.getDistRow(iat); const auto& displ = d_ie.getDisplRow(iat); int gid = source.GroupID[isrc]; RealType r = dist[isrc]; RealType rinv = 1.0 / r; PosType dr = displ[isrc]; if (F[gid] != nullptr) { U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]); g_return -= dU[isrc] * rinv * dr; } } return g_return; } inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc, TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad, TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad) { GradType g_return(0.0); const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { const auto& dist = d_ie.getDistRow(iat); const auto& displ = d_ie.getDisplRow(iat); int gid = source.GroupID[isrc]; RealType r = dist[isrc]; RealType rinv = 1.0 / r; PosType dr = displ[isrc]; if (F[gid] != nullptr) { U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]); } else { APP_ABORT("J1OrbitalSoa::evaluateGradSource: F[gid]==nullptr") } g_return -= dU[isrc] * rinv * dr; //The following terms depend only on the radial component r. Thus, //we compute them and mix with position vectors to acquire the full //cartesian vector objects. valT grad_component = (d2U[isrc] - dU[isrc] * rinv); valT lapl_component = d3U[isrc] + 2 * rinv * grad_component; for (int idim = 0; idim < OHMMS_DIM; idim++) { grad_grad[idim][iat] += dr[idim] * dr * rinv * rinv * grad_component; grad_grad[idim][iat][idim] += rinv * dU[isrc]; lapl_grad[idim][iat] -= lapl_component * rinv * dr[idim]; } } return g_return; } }; } // namespace qmcplusplus #endif
operator_tune-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef MXNET_OPERATOR_OPERATOR_TUNE_INL_H_ #define MXNET_OPERATOR_OPERATOR_TUNE_INL_H_ #include <dmlc/base.h> #include <dmlc/logging.h> #include <mshadow/base.h> #include <atomic> #include <cstdint> #include <chrono> #include <thread> #include <string> #include <vector> #include <algorithm> #include <list> #include <random> #include <unordered_set> #include "./mxnet_op.h" #include "./operator_tune.h" #if (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && !defined(__mips__) # define HAS_CXA_DEMANGLE 1 #else # define HAS_CXA_DEMANGLE 0 #endif #if HAS_CXA_DEMANGLE #include <cxxabi.h> #endif namespace mxnet { namespace op { #ifndef MXNET_NO_INLINE #ifdef _MSC_VER #define MXNET_NO_INLINE __declspec(noinline) #else #define MXNET_NO_INLINE __attribute__((noinline)) #endif #endif // MXNET_NO_INLINE #define OUTSIDE_COUNT_SHIFT 3 namespace tune { /*! * \brief Convert TuningMode value to a string representation * \param tm Scalar TuningMode value * \return Character pointer to a string representing the TuningMode value */ inline const char *TuningModeToString(const TuningMode tm) { switch (tm) { case kAuto: return "Auto"; case kNeverOMP: return "NeverOMP"; case kAlwaysOMP: return "AlwaysOMP"; default: CHECK(false) << "Unknown TuningMode type: " << static_cast<int>(tm); return "<unknown>"; } } } // namespace tune /*! * \brief Engine to tune kernel operations * \tparam DType Data type to be used when tuning the kernel operations * \remarks The basic concept here is that we time how long a trivial loop takes with and without * OMP, subtracting the non-OMP run from the OMP run, which gives us the time * that the OMP overhead takes. Times were found to be relatively invariant with * regard ot the number of threads/cores on a given machine. * Secondly, supplied operators are run and timed (for each data type) in order to determine * their individual time cost. * * Knowing the following items, we can determine how long the OMP and non-OMP run * is expected to take: * 1) OMP overhead time * 2) Number of iterations required * 3) Number of threads to be used if we choose the OMP method * 4) The data type * * Therefore, at Kernel::Launch() time, we can estimate whether it is faster to use OMP or not * for the given kernel operator. * * Results and efficiency of the tuning is tested in the gtest OMP_TUNING test suite */ template<typename DType> class OperatorTune : public OperatorTuneByType<DType> { public: using Tick = OperatorTuneBase::Tick; using duration_t = OperatorTuneBase::duration_t; using OperatorTuneByType<DType>::tuning_mode_; /*! * \brief Constructor */ OperatorTune() { TuneAll(); } /*! * \brief Initialize the OperatorTune object * \return Whether the OperatorTune object was successfully initialized */ static bool Initialize() { if (!initialized_) { initialized_ = true; // Generate some random data for calling the operator kernels data_set_ = std::unique_ptr<DType[]>(reinterpret_cast<DType*>(new char[0x100 * sizeof(DType)])); std::random_device rd; std::mt19937 gen(rd()); if (!std::is_integral<DType>::value) { std::uniform_real_distribution<> dis(-1, 1); for (int n = 0; n < 0x100; ++n) { const auto val = static_cast<DType>(dis(gen)); // If too close to zero, try again if (std::fabs(static_cast<double>(val)) < 1e-5) { --n; continue; } data_set_[n] = val; } } else { std::uniform_int_distribution<> dis(-128, 127); for (int n = 0; n < 0x100; ++n) { const auto val = static_cast<DType>(dis(gen)); // If zero, try again if (!val) { --n; continue; } data_set_[n] = val; } } // Use this environment variable to generate new tuning statistics // In order to avoid printing too many copies, only the float32 object prints output_tuning_data_ = mshadow::DataType<DType>::kFlag == mshadow::kFloat32 && dmlc::GetEnv("MXNET_OUTPUT_TUNING_DATA", false); // If outputting tuning data, then also output verbose logging info OperatorTuneBase::verbose_tuning_info_ = dmlc::GetEnv("MXNET_VERBOSE_TUNING_INFO", false); OperatorTuneBase::tuning_weight_scale_ = dmlc::GetEnv("MXNET_TUNING_WEIGHT_SCALE", 0.0); // This isn't actually supposed to be multithreaded init, but just to be sure the change is // seen everywhere, using atomic bool. if (!OperatorTuneBase::calculated_.load()) { // Not especially concerned with a race condition, since this hsould // run when only one thread is active (static init), just don't cache this variable OperatorTuneBase::calculated_.store(true); std::string config = dmlc::GetEnv("MXNET_USE_OPERATOR_TUNING", std::string()); StringUtil::trim(&config); // disabled if (!config.empty() && ::isdigit(config[0]) && std::atoi(config.c_str()) == 0) { OperatorTuneBase::omp_overhead_ns_ = INT_MAX; } else { OperatorTuneBase::omp_overhead_ns_ = GetOMPLoopOverhead(); } ParseEnablerConfig(config); } if (OperatorTuneBase::verbose_tuning_info_) { LOG(INFO) << "OMP overhead: " << OperatorTuneBase::omp_overhead_ns_ << " nanoseconds"; } } return true; } /*! * \brief Schedule a tuning run * \tparam OP Operator to tune * \param tune_func Function to call which tunes the operator * \return true if the tune operation was scheduled */ template<typename OP> static bool ScheduleTune(void (*tune_func)()) { #ifdef MXNET_USE_OPERATOR_TUNING if (tune_func) { GetTuningList()->push_back(tune_func); operator_names_.insert(demangle(typeid(OP).name())); return true; } return false; #else return true; #endif } /*! * \brief Is the template parameter type a tuned kernel? * \tparam OP kernel operator type * \return true if the operator/kernel is tuned */ template<typename OP> static bool IsTuned() { return operator_names_.find(demangle(typeid(OP).name())) != operator_names_.end(); } /*!\ * \brief Tune all registered kernel operators that haven't already been tuned */ static bool TuneAll() { Initialize(); std::list<void (*)()> *tl = GetTuningList(); const size_t size_save = tl->size(); // For checking if anything asynchronous is // adding or removing items, which is forbidden if (output_tuning_data_ && !tl->empty()) { // Only emit this once, use the most common case, 'float32' if (mshadow::DataType<DType>::kFlag == mshadow::kFloat32) { std::cout << "OperatorTuneBase::duration_t " << "OperatorTuneBase::omp_overhead_ns_ = " << OperatorTuneBase::omp_overhead_ns_ << ";" << std::endl << std::flush; } } const Tick start = std::chrono::high_resolution_clock::now(); for (auto i : *tl) { (*i)(); } if (OperatorTuneBase::verbose_tuning_info_) { const duration_t duration = OperatorTune::GetDurationInNanoseconds(start); LOG(INFO) << "Op Tuning for " << type_name<DType>() << " took " << (duration / 1000000) << " ms"; } CHECK_EQ(size_save, tl->size()) << "Tuning list size should not have changed while tuning"; tl->clear(); return true; } /*! * \brief Return set of operator names that were registered to be tuned. Does not imply * that the operator has been tuned. * \return Set of operator/kernel names that were registered for tuning */ static const std::unordered_set<std::string>& TunedOperatorNames() { return operator_names_; } protected: /*! * \brief Get the list of tuning function calls for the operators * \return Pointer to list of tuning function calls */ static std::list<void (*)()> *GetTuningList(); /*! * \brief Demangle typeid::name() in order to generate source macros * \param name C++ Mangled name * \return Demangled name as string */ static inline std::string demangle(const char *name) { #if HAS_CXA_DEMANGLE int status = -4; // some arbitrary value to eliminate the compiler warning std::unique_ptr<char, void (*)(void *)> res{ abi::__cxa_demangle(name, nullptr, nullptr, &status), &std::free }; return status ? name : res.get(); #else return name; #endif } /*! * \brief Type name as string * \tparam T Type * \return std::string representing the human-readable demangled type name */ template<typename T> static inline std::string type_name() { return demangle(typeid(T).name()); } /*! \brief Measure OMP overhead for a trivial OMP loop using all cores * \param omp_thread_count - Number of OMP threads to use in the timing test * \returns Duration in nanoseconds for the OMP overhead (time to initiate and close the * OMP session) */ static duration_t GetOMPLoopOverhead(const size_t omp_thread_count) { CHECK_GT(omp_thread_count, 1); // Don't try to use OMP for one thread int wl_count = OperatorTuneBase::WORKLOAD_COUNT; Tick start = std::chrono::high_resolution_clock::now(); // Use two loops in order to simulate OMP outside timing for (size_t i = 0; i < OUTSIDE_COUNT; ++i) { for (int x = 0; x < wl_count; ++x) { // trivial operation volatile_int_ += x; } } const OperatorTuneBase::duration_t no_omp_duration = OperatorTuneBase::GetDurationInNanoseconds(start); // Scale OMP iterations by type calculation complexity double factor; // if tuning_weight_scale_ is a number that looks valid, use it as the factor if (OperatorTuneBase::tuning_weight_scale_ > 0.01) { factor = OperatorTuneBase::tuning_weight_scale_; } else { // These are empirically-determined constants found by balancing between // a desktop (8 & 12 cpu's) and large cloud instances (32 & 64 cpu's) switch (mshadow::DataType<DType>::kFlag) { case mshadow::kUint8: case mshadow::kInt8: factor = 8.5; break; case mshadow::kInt32: factor = 4.5; break; case mshadow::kInt64: factor = 2; break; case mshadow::kFloat64: factor = 1.25; break; case mshadow::kFloat32: default: factor = 1.0; break; } } wl_count = static_cast<int>(factor * OperatorTuneBase::WORKLOAD_COUNT * omp_thread_count); start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < OUTSIDE_COUNT; ++i) { #pragma omp parallel for num_threads(omp_thread_count) for (int x = 0; x < wl_count; ++x) { // trivial operation volatile_int_ += x; } } const duration_t omp_duration = OperatorTuneBase::GetDurationInNanoseconds(start) - no_omp_duration; return omp_duration >> OUTSIDE_COUNT_SHIFT; } /*! \brief Measure OMP overhead for a trivial OMP loop using all cores * \returns Time in nanoseconds to initialize/cleanup when excuting an OMP block */ static duration_t GetOMPLoopOverhead() { // It was found empirically that OMP times was not heavily tied to number of cores, // so take an average across all core counts const auto max_cores_default = static_cast<size_t>(omp_get_num_procs()) >> 1; const auto max_cores = dmlc::GetEnv("MXNET_USE_NUM_CORES_OPERATOR_TUNING", max_cores_default); if (max_cores >= 2) { std::vector<duration_t> core_times; // Take care of any OMP lazy-init with a throwaway call for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) { GetOMPLoopOverhead(omp_threads); } std::vector<duration_t> durations; durations.reserve(max_cores - 1); for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) { const duration_t duration = GetOMPLoopOverhead(omp_threads); if (OperatorTuneBase::verbose_tuning_info_) { LOG(INFO) << "OMP Thread Count: " << omp_threads << ", overhead: " << duration << " ns"; } durations.emplace_back(duration); } // return median std::sort(durations.begin(), durations.end()); return durations[durations.size() >> 1]; } return INT_MAX; // If only one core, then never use OMP (say the overhead is huge) } /*! * \brief Some string utility functions that aren't specific to tuning */ struct StringUtil { /*! * \brief Terim whitespace from beninning and end of string * \param s String to trimp * \return reference to the modified string. This is the same std::string object as what was * supplied in the parameters */ static std::string &trim(std::string *s) { s->erase(s->begin(), std::find_if(s->begin(), s->end(), [](int ch) { return !std::isspace(ch); })); s->erase(std::find_if(s->rbegin(), s->rend(), [](int ch) { return !std::isspace(ch); }).base(), s->end()); return *s; } /*! * \brief Tokenize a string into a list of tokens * \param s String to tokenize * \return std::list of tokens */ static std::list<std::string> string2list(const std::string &s) { std::list<std::string> res; std::istringstream iss(s); std::string token; while (std::getline(iss, token, ',')) { trim(&token); if (!token.empty()) { res.push_back(token); } } return res; } }; /*! * \brief Get data type from string representation * \warning Do not call from a performance-sensitive area */ static int type_from_string(const std::string& type_string) { if (type_string == "float32") return mshadow::kFloat32; if (type_string == "float64") return mshadow::kFloat64; if (type_string == "float16") return mshadow::kFloat16; if (type_string == "int8") return mshadow::kInt8; if (type_string == "uint8") return mshadow::kUint8; if (type_string == "int32") return mshadow::kInt32; if (type_string == "int64") return mshadow::kInt64; return -1; // invalid } /*! * \brief Parse MXNET_USE_OPERATOR_TUNING environment variable * \param config String representation of MXNET_ENABLE_OPERATOR_TUNING environment variable * Values: * 0=disable all * 1=enable all * float32, float16, float32=list of types to enable, and disable those not listed */ static void ParseEnablerConfig(std::string config) { StringUtil::trim(&config); if (!config.empty()) { // First disable all OperatorTuneByType<float>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<double>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAlwaysOMP); // See if it's a non-number (ie type or list of types) if (!::isdigit(config[0])) { OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto); std::list<std::string> tokens = StringUtil::string2list(config); for (const std::string& stype : tokens) { // We don't have an enum for halt_t const int typ = type_from_string(stype); if (typ >= 0) { switch (typ) { case mshadow::kFloat32: OperatorTuneByType<float>::set_tuning_mode(tune::kAuto); break; case mshadow::kFloat64: OperatorTuneByType<double>::set_tuning_mode(tune::kAuto); break; case mshadow::kFloat16: OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kInt8: OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kUint8: OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kInt32: OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kInt64: OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto); break; default: CHECK(false) << "Unsupported tuning data type: " << stype; break; } } else { // -1 is error LOG(WARNING) << "Unknown data type to be tuned: " << stype; } } } else { if (std::atoi(config.c_str()) > 0) { OperatorTuneByType<float>::set_tuning_mode(tune::kAuto); OperatorTuneByType<double>::set_tuning_mode(tune::kAuto); OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto); } } } } /*! \brief Whether this object has been initialized */ static bool initialized_; /*! \brief Number of passes to obtain an average */ static constexpr duration_t OUTSIDE_COUNT = (1 << OUTSIDE_COUNT_SHIFT); /*! \brief Random data for timing operator calls */ static std::unique_ptr<DType[]> data_set_; /*! \brief Operators tuned */ static std::unordered_set<std::string> operator_names_; /*! \brief Arbitary object to modify in OMP loop */ static volatile int volatile_int_; /*! \brief Output insertable (into code) instantiation+default-value macros */ static bool output_tuning_data_; }; /*! * \brief Class that tunes unary operators * \tparam DType Data type to be used when tuning the kernel operations */ template<typename DType> class UnaryOpTune : public OperatorTune<DType> { protected: typedef OperatorTune<DType> Super; using duration_t = typename Super::duration_t; using Tick = typename Super::Tick; /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take no arguments (ie set_zero) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetBlankWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res += OP::Map(); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take one argument (ie sqrt()) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetUnaryWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res = OP::Map(Super::data_set_[i & 0xFF]); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take two arguments (ie elemwise_add()) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static inline duration_t GetBinaryWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res = OP::Map(Super::data_set_[i & 0xFF], Super::data_set_[(i + 1) & 0xFF]); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take three arguments (ie backwards_grad<elemwise_add>()) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetTertiaryWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res = OP::Map(Super::data_set_[i & 0xFF], Super::data_set_[(i + 1) & 0xFF], Super::data_set_[i & 0xFF]); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for mxnet-like kernels that take no arguments) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetBlankWorkloadEx() { std::unique_ptr<DType[]> tmp(new DType[Super::WORKLOAD_COUNT]); DType *tmp_ptr = tmp.get(); const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { OP::Map(i, tmp_ptr); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } public: /*! * \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes an operator which takes no arguments * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneBlankOperator() { mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkload<OP>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes an operator which takes one argument * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneUnaryOperator() { mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetUnaryWorkload<OP>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes a backward operator which takes one argument * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneUnaryBackwardOperator() { mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] = GetBinaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_UNARY_WORKLOAD_BWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune the specified "mxnet_op-type" kernel operator. * Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes an operator which takes no arguments * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneBlankOperatorEx() { mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkloadEx<OP>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_BLANK_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Determine whether to use OMP based upon both timing and configuration using the * given (templated) operator's workload * \tparam OP Operator whose workload to use (tuned_op::workload_[0]) * \param N Number of iterations desired * \param thread_count Number of OMP threads available to perform the iterations * \returns Whether it's faster to use OMP for these iterations */ template<typename OP> inline static bool UseOMP(size_t N, size_t thread_count) { return OperatorTune<DType>::UseOMP(N, thread_count, static_cast<uint64_t>(N) * OP::workload_[0]); } }; /*! * \brief Class that tunes binary and unary operators * \tparam DType Data type to be used when tuning the kernel operations */ template<typename DType> class BinaryOpTune : public UnaryOpTune<DType> { protected: typedef UnaryOpTune<DType> Super; public: /*! * \brief Tune a generic binary operator * @tparam OP - Operator type */ template<typename OP> static void TuneBinaryOperator() { mxnet_op::tuned_op<OP, DType>::workload_[0] = Super::template GetBinaryWorkload<OP>(); if (Super::Super::output_tuning_data_) { std::cout << "IMPLEMENT_BINARY_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune binary backward operator * \tparam OP - operator */ template<typename OP> static void TuneBinaryBackwardOperator() { mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] = Super::template GetTertiaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>(); if (Super::Super::output_tuning_data_) { std::cout << "IMPLEMENT_BINARY_WORKLOAD_BWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } }; #undef OUTSIDE_COUNT_SHIFT #undef WORKLOAD_COUNT_SHIFT } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
imginputfileconn.h
/** * DeepDetect * Copyright (c) 2014 Emmanuel Benazera * Author: Emmanuel Benazera <beniz@droidnik.fr> * * This file is part of deepdetect. * * deepdetect is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * deepdetect is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with deepdetect. If not, see <http://www.gnu.org/licenses/>. */ #ifndef IMGINPUTFILECONN_H #define IMGINPUTFILECONN_H #include "inputconnectorstrategy.h" #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include "ext/base64/base64.h" #include "utils/apitools.h" #include <random> namespace dd { class DDImg { public: DDImg() {} ~DDImg() {} // base64 detection bool is_within_base64_range(char c) const { if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || (c == '+' || c=='/' || c=='=')) return true; else return false; } bool possibly_base64(const std::string &s) const { bool ism = is_multiple_four(s); if (!ism) return false; for (char c: s) { bool within_64 = is_within_base64_range(c); if (!within_64) return false; } return true; } bool is_multiple_four(const std::string &s) const { if (s.length() % 4 == 0) return true; else return false; } void scale(const cv::Mat &src, cv::Mat &dst) const { float coef = std::min(static_cast<float>(_scale_max) / std::max(src.rows, src.cols), static_cast<float>(_scale_min) / std::min(src.rows, src.cols)); cv::resize(src, dst, cv::Size(), coef, coef, CV_INTER_CUBIC); } // decode image void decode(const std::string &str) { std::vector<unsigned char> vdat(str.begin(),str.end()); cv::Mat img = cv::Mat(cv::imdecode(cv::Mat(vdat,true), _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR))); _imgs_size.push_back(std::pair<int,int>(img.rows,img.cols)); cv::Mat rimg; if (_scaled) scale(img, rimg); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // XXX - Do nothing and keep native resolution. May cause issues if batched images are different resolutions rimg = img; } else { // Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different resolutions size_t currMaxDim = std::max(img.rows, img.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC); } } else { // Resize normally to the specified width and height cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC); } if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width)/2; int heightBorder = (_height - _crop_height)/2; rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } _imgs.push_back(rimg); } // deserialize image, independent of format void deserialize(std::stringstream &input) { size_t size = 0; input.seekg(0,input.end); size = input.tellg(); input.seekg(0,input.beg); char* data = new char[size]; input.read(data, size); std::string str(data,data+size); delete[]data; decode(str); } // data acquisition int read_file(const std::string &fname) { cv::Mat img = cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)); if (img.empty()) { _logger->error("empty image {}",fname); return -1; } _imgs_size.push_back(std::pair<int,int>(img.rows,img.cols)); cv::Mat rimg; try { if (_scaled) scale(img, rimg); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // Do nothing and keep native resolution. May cause issues if batched images are different resolutions rimg = img; } else { // Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different resolutions size_t currMaxDim = std::max(img.rows, img.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC); } } else { // Resize normally to the specified width and height cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC); } } catch(...) { throw InputConnectorBadParamException("failed resizing image " + fname); } if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width)/2; int heightBorder = (_height - _crop_height)/2; try { rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } catch(...) { throw InputConnectorBadParamException("failed cropping image " + fname); } } _imgs.push_back(rimg); return 0; } int read_db(const std::string &fname) { _db_fname = fname; return 0; } int read_mem(const std::string &content) { cv::Mat timg; _b64 = possibly_base64(content); if (_b64) { std::string ccontent; Base64::Decode(content,&ccontent); std::stringstream sstr; sstr << ccontent; deserialize(sstr); } else { decode(content); } if (_imgs.at(0).empty()) return -1; return 0; } int read_dir(const std::string &dir) { // list directories in dir std::unordered_set<std::string> subdirs; if (fileops::list_directory(dir,false,true,false,subdirs)) throw InputConnectorBadParamException("failed reading text subdirectories in data directory " + dir); _logger->info("imginputfileconn: list subdirs size={}",subdirs.size()); // list files and classes std::vector<std::pair<std::string,int>> lfiles; // labeled files std::unordered_map<int,std::string> hcorresp; // correspondence class number / class name if (!subdirs.empty()) { int cl = 0; auto uit = subdirs.begin(); while(uit!=subdirs.end()) { std::unordered_set<std::string> subdir_files; if (fileops::list_directory((*uit),true,false,true,subdir_files)) throw InputConnectorBadParamException("failed reading image data sub-directory " + (*uit)); auto fit = subdir_files.begin(); while(fit!=subdir_files.end()) // XXX: re-iterating the file is not optimal { lfiles.push_back(std::pair<std::string,int>((*fit),cl)); ++fit; } ++cl; ++uit; } } else { std::unordered_set<std::string> test_files; fileops::list_directory(dir,true,false,false,test_files); auto fit = test_files.begin(); while(fit!=test_files.end()) { lfiles.push_back(std::pair<std::string,int>((*fit),-1)); // -1 for no class ++fit; } } // read images _imgs.reserve(lfiles.size()); _img_files.reserve(lfiles.size()); _labels.reserve(lfiles.size()); for (std::pair<std::string,int> &p: lfiles) { cv::Mat img = cv::imread(p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)); _imgs_size.push_back(std::pair<int,int>(img.rows,img.cols)); cv::Mat rimg; try { if (_scaled) scale(img, rimg); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // Do nothing and keep native resolution. May cause issues if batched images are different resolutions rimg = img; } else { // Resize so that the larger dimension is set to whichever (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different resolutions size_t currMaxDim = std::max(img.rows, img.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::resize(img,rimg,cv::Size(),scale,scale,CV_INTER_CUBIC); } } else { // Resize normally to the specified width and height cv::resize(img,rimg,cv::Size(_width,_height),0,0,CV_INTER_CUBIC); } } catch(...) { throw InputConnectorBadParamException("failed resizing image " + p.first); } if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width)/2; int heightBorder = (_height - _crop_height)/2; try { rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } catch(...) { throw InputConnectorBadParamException("failed cropping image " + p.first); } } _imgs.push_back(rimg); _img_files.push_back(p.first); if (p.second >= 0) _labels.push_back(p.second); if (_imgs.size() % 1000 == 0) _logger->info("read {} images",_imgs.size()); } return 0; } std::vector<cv::Mat> _imgs; std::vector<std::string> _img_files; std::vector<std::pair<int,int>> _imgs_size; bool _bw = false; bool _b64 = false; bool _unchanged_data = false; std::vector<int> _labels; int _width = 224; int _height = 224; int _crop_width = 0; int _crop_height = 0; bool _scaled = false; int _scale_min = 600; int _scale_max = 1000; std::string _db_fname; std::shared_ptr<spdlog::logger> _logger; }; class ImgInputFileConn : public InputConnectorStrategy { public: ImgInputFileConn() :InputConnectorStrategy(){} ImgInputFileConn(const ImgInputFileConn &i) :InputConnectorStrategy(i), _width(i._width),_height(i._height), _crop_width(i._crop_width),_crop_height(i._crop_height), _bw(i._bw),_unchanged_data(i._unchanged_data), _mean(i._mean),_has_mean_scalar(i._has_mean_scalar), _scaled(i._scaled), _scale_min(i._scale_min), _scale_max(i._scale_max) {} ~ImgInputFileConn() {} void init(const APIData &ad) { fillup_parameters(ad); } void fillup_parameters(const APIData &ad) { // optional parameters. if (ad.has("width")) _width = ad.get("width").get<int>(); if (ad.has("height")) _height = ad.get("height").get<int>(); if (ad.has("crop_width")) { _crop_width = ad.get("crop_width").get<int>(); if (_crop_width > _width) { _logger->error("Crop width must be less than or equal to width"); throw InputConnectorBadParamException("Crop width must be less than or equal to width"); } } if (ad.has("crop_height")) { _crop_height = ad.get("crop_height").get<int>(); if (_crop_height > _height) { _logger->error("Crop height must be less than or equal to height"); throw InputConnectorBadParamException("Crop height must be less than or equal to height"); } } if (ad.has("bw")) _bw = ad.get("bw").get<bool>(); if (ad.has("unchanged_data")) _unchanged_data = ad.get("unchanged_data").get<bool>(); if (ad.has("shuffle")) _shuffle = ad.get("shuffle").get<bool>(); if (ad.has("seed")) _seed = ad.get("seed").get<int>(); if (ad.has("test_split")) _test_split = ad.get("test_split").get<double>(); if (ad.has("mean")) { apitools::get_floats(ad, "mean", _mean); _has_mean_scalar = true; } // Variable size if (ad.has("scaled") || ad.has("scale_min") || ad.has("scale_max")) _scaled = true; if (ad.has("scale_min")) _scale_min = ad.get("scale_min").get<int>(); if (ad.has("scale_max")) _scale_max = ad.get("scale_max").get<int>(); } int feature_size() const { if (_bw || _unchanged_data) { // XXX: only valid for single channels if (_crop_width != 0 && _crop_height != 0) return _crop_width*_crop_height; else return _width*_height; } else { // RGB if (_crop_width != 0 && _crop_height != 0) return _crop_width*_crop_height*3; else return _width*_height*3; } } int batch_size() const { return _images.size(); } int test_batch_size() const { return _test_images.size(); } void transform(const APIData &ad) { get_data(ad); if (ad.has("parameters")) // hotplug of parameters, overriding the defaults { APIData ad_param = ad.getobj("parameters"); if (ad_param.has("input")) { fillup_parameters(ad_param.getobj("input")); } } int catch_read = 0; std::string catch_msg; std::vector<std::string> uris; std::vector<std::string> failed_uris; #pragma omp parallel for for (size_t i=0;i<_uris.size();i++) { bool no_img = false; std::string u = _uris.at(i); DataEl<DDImg> dimg; dimg._ctype._bw = _bw; dimg._ctype._unchanged_data = _unchanged_data; dimg._ctype._width = _width; dimg._ctype._height = _height; dimg._ctype._crop_width = _crop_width; dimg._ctype._crop_height = _crop_height; dimg._ctype._scaled = _scaled; dimg._ctype._scale_min = _scale_min; dimg._ctype._scale_max = _scale_max; try { if (dimg.read_element(u,this->_logger)) { _logger->error("no data for image {}",u); no_img = true; } if (!dimg._ctype._db_fname.empty()) _db_fname = dimg._ctype._db_fname; } catch(std::exception &e) { #pragma omp critical { ++catch_read; catch_msg = e.what(); failed_uris.push_back(u); no_img = true; } } if (no_img) continue; if (!_db_fname.empty()) continue; #pragma omp critical { _images.insert(_images.end(), std::make_move_iterator(dimg._ctype._imgs.begin()), std::make_move_iterator(dimg._ctype._imgs.end())); _images_size.insert(_images_size.end(), std::make_move_iterator(dimg._ctype._imgs_size.begin()), std::make_move_iterator(dimg._ctype._imgs_size.end())); if (!dimg._ctype._labels.empty()) _test_labels.insert(_test_labels.end(), std::make_move_iterator(dimg._ctype._labels.begin()), std::make_move_iterator(dimg._ctype._labels.end())); if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1) uris.push_back(u); else if (!dimg._ctype._img_files.empty()) uris.insert(uris.end(), std::make_move_iterator(dimg._ctype._img_files.begin()), std::make_move_iterator(dimg._ctype._img_files.end())); else uris.push_back(std::to_string(i)); } } if (catch_read) { for (auto s: failed_uris) _logger->error("failed reading image {}",s); throw InputConnectorBadParamException(catch_msg); } _uris = uris; if (!_db_fname.empty()) return; // db filename is passed to backend // shuffle before possible split if (_shuffle) { std::mt19937 g; if (_seed >= 0) g = std::mt19937(_seed); else { std::random_device rd; g = std::mt19937(rd()); } std::shuffle(_images.begin(),_images.end(),g); //XXX beware: labels are not shuffled, i.e. let's not shuffle while testing } // split as required if (_test_split > 0) { int split_size = std::floor(_images.size() * (1.0-_test_split)); auto chit = _images.begin(); auto dchit = chit; int cpos = 0; while(chit!=_images.end()) { if (cpos == split_size) { if (dchit == _images.begin()) dchit = chit; _test_images.push_back((*chit)); } else ++cpos; ++chit; } _images.erase(dchit,_images.end()); _logger->info("data split test size={} / remaining data size={}",_test_images.size(),_images.size()); } if (_images.empty()) throw InputConnectorBadParamException("no image could be found"); } // data std::vector<cv::Mat> _images; std::vector<cv::Mat> _test_images; std::vector<int> _test_labels; std::vector<std::pair<int,int>> _images_size; // image parameters int _width = 224; int _height = 224; int _crop_width = 0; int _crop_height = 0; bool _bw = false; /**< whether to convert to black & white. */ bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */ double _test_split = 0.0; /**< auto-split of the dataset. */ int _seed = -1; /**< shuffling seed. */ std::vector<float> _mean; /**< mean image pixels, to be subtracted from images. */ bool _has_mean_scalar = false; /**< whether scalar is set. */ std::string _db_fname; bool _scaled = false; int _scale_min = 600; int _scale_max = 1000; }; } #include "caffeinputconns.h" #ifdef USE_TF #include "backends/tf/tfinputconns.h" #endif #ifdef USE_DLIB #include "backends/dlib/dlibinputconns.h" #endif #ifdef USE_CAFFE2 #include "backends/caffe2/caffe2inputconns.h" #endif #endif
GB_helper.c
//------------------------------------------------------------------------------ // GB_helper.c: helper functions for @GrB interface //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // These functions are only used by the @GrB interface for // SuiteSparse:GraphBLAS. #include "GB_helper.h" //------------------------------------------------------------------------------ // GB_NTHREADS: determine the number of threads to use //------------------------------------------------------------------------------ #define GB_NTHREADS(work) \ int nthreads_max = GB_Global_nthreads_max_get ( ) ; \ double chunk = GB_Global_chunk_get ( ) ; \ int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //------------------------------------------------------------------------------ // GB_ALLOCATE_WORK: allocate per-thread workspace //------------------------------------------------------------------------------ #define GB_ALLOCATE_WORK(work_type) \ size_t Work_size ; \ work_type *Work = GB_MALLOC_WORK (nthreads, work_type, &Work_size) ; \ if (Work == NULL) return (false) ; //------------------------------------------------------------------------------ // GB_FREE_WORKSPACE: free per-thread workspace //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ GB_FREE_WORK (&Work, Work_size) ; //------------------------------------------------------------------------------ // GB_helper0: get the current wall-clock time from OpenMP //------------------------------------------------------------------------------ double GB_helper0 (void) { return (GB_OPENMP_GET_WTIME) ; } //------------------------------------------------------------------------------ // GB_helper1: convert 0-based indices to 1-based for gbextracttuples //------------------------------------------------------------------------------ void GB_helper1 // convert zero-based indices to one-based ( double *restrict I_double, // output array const GrB_Index *restrict I, // input array int64_t nvals // size of input and output arrays ) { GB_NTHREADS (nvals) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { I_double [k] = (double) (I [k] + 1) ; } } //------------------------------------------------------------------------------ // GB_helper1i: convert 0-based indices to 1-based for gbextracttuples //------------------------------------------------------------------------------ void GB_helper1i // convert zero-based indices to one-based ( int64_t *restrict I, // input/output array int64_t nvals // size of input/output array ) { GB_NTHREADS (nvals) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { I [k] ++ ; } } //------------------------------------------------------------------------------ // GB_helper3: convert 1-based indices to 0-based for gb_mxarray_to_list //------------------------------------------------------------------------------ bool GB_helper3 // return true if OK, false on error ( int64_t *restrict List, // size len, output array const double *restrict List_double, // size len, input array int64_t len, int64_t *List_max // also compute the max entry in the list ) { GB_NTHREADS (len) ; ASSERT (List != NULL) ; ASSERT (List_double != NULL) ; ASSERT (List_max != NULL) ; bool ok = true ; int64_t listmax = -1 ; GB_ALLOCATE_WORK (int64_t) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { bool my_ok = true ; int64_t k1, k2, my_listmax = -1 ; GB_PARTITION (k1, k2, len, tid, nthreads) ; for (int64_t k = k1 ; k < k2 ; k++) { double x = List_double [k] ; int64_t i = (int64_t) x ; my_ok = my_ok && (x == (double) i) ; my_listmax = GB_IMAX (my_listmax, i) ; List [k] = i - 1 ; } // rather than create a separate per-thread boolean workspace, just // use a sentinal value of INT64_MIN if non-integer indices appear // in List_double. Work [tid] = my_ok ? my_listmax : INT64_MIN ; } // wrapup for (tid = 0 ; tid < nthreads ; tid++) { listmax = GB_IMAX (listmax, Work [tid]) ; ok = ok && (Work [tid] != INT64_MIN) ; } GB_FREE_WORKSPACE ; (*List_max) = listmax ; return (ok) ; } //------------------------------------------------------------------------------ // GB_helper3i: convert 1-based indices to 0-based for gb_mxarray_to_list //------------------------------------------------------------------------------ bool GB_helper3i // return true if OK, false on error ( int64_t *restrict List, // size len, output array const int64_t *restrict List_int64, // size len, input array int64_t len, int64_t *List_max // also compute the max entry in the list ) { GB_NTHREADS (len) ; int64_t listmax = -1 ; GB_ALLOCATE_WORK (int64_t) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t k1, k2, my_listmax = -1 ; GB_PARTITION (k1, k2, len, tid, nthreads) ; for (int64_t k = k1 ; k < k2 ; k++) { int64_t i = List_int64 [k] ; my_listmax = GB_IMAX (my_listmax, i) ; List [k] = i - 1 ; } Work [tid] = my_listmax ; } // wrapup for (tid = 0 ; tid < nthreads ; tid++) { listmax = GB_IMAX (listmax, Work [tid]) ; } GB_FREE_WORKSPACE ; (*List_max) = listmax ; return (true) ; } //------------------------------------------------------------------------------ // GB_helper4: find the max entry in an index list for gbbuild //------------------------------------------------------------------------------ bool GB_helper4 // return true if OK, false on error ( const GrB_Index *restrict I, // array of size len const int64_t len, GrB_Index *List_max // find max (I) + 1 ) { GB_NTHREADS (len) ; GrB_Index listmax = 0 ; GB_ALLOCATE_WORK (GrB_Index) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t k1, k2 ; GrB_Index my_listmax = 0 ; GB_PARTITION (k1, k2, len, tid, nthreads) ; for (int64_t k = k1 ; k < k2 ; k++) { my_listmax = GB_IMAX (my_listmax, I [k]) ; } Work [tid] = my_listmax ; } // wrapup for (tid = 0 ; tid < nthreads ; tid++) { listmax = GB_IMAX (listmax, Work [tid]) ; } GB_FREE_WORKSPACE ; if (len > 0) listmax++ ; (*List_max) = listmax ; return (true) ; } //------------------------------------------------------------------------------ // GB_helper5: construct pattern of S for gblogassign //------------------------------------------------------------------------------ void GB_helper5 // construct pattern of S ( GrB_Index *restrict Si, // array of size anz GrB_Index *restrict Sj, // array of size anz const GrB_Index *restrict Mi, // array of size mnz, M->i, may be NULL const GrB_Index *restrict Mj, // array of size mnz, const int64_t mvlen, // M->vlen GrB_Index *restrict Ai, // array of size anz, A->i, may be NULL const int64_t avlen, // M->vlen const GrB_Index anz ) { GB_NTHREADS (anz) ; ASSERT (Mj != NULL) ; ASSERT (Si != NULL) ; ASSERT (Sj != NULL) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < anz ; k++) { int64_t i = GBI (Ai, k, avlen) ; Si [k] = GBI (Mi, i, mvlen) ; Sj [k] = Mj [i] ; } } //------------------------------------------------------------------------------ // GB_helper7: Kx = uint64 (0:mnz-1), for gblogextract //------------------------------------------------------------------------------ // TODO: use GrB_apply with a positional operator instead void GB_helper7 // Kx = uint64 (0:mnz-1) ( uint64_t *restrict Kx, // array of size mnz const GrB_Index mnz ) { GB_NTHREADS (mnz) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < mnz ; k++) { Kx [k] = k ; } } //------------------------------------------------------------------------------ // GB_helper8: expand a scalar into an array for gbbuild //------------------------------------------------------------------------------ // TODO: use GrB_assign instead void GB_helper8 ( GB_void *C, // output array of size nvals * s GB_void *A, // input scalar of size s GrB_Index nvals, // size of C size_t s // size of each scalar ) { GB_NTHREADS (nvals) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { // C [k] = A [0] memcpy (C + k * s, A, s) ; } } //------------------------------------------------------------------------------ // GB_helper10: compute norm (x-y,p) of two dense FP32 or FP64 vectors //------------------------------------------------------------------------------ // p can be: // 0 or 2: 2-norm, sqrt (sum ((x-y).^2)) // 1: 1-norm, sum (abs (x-y)) // INT64_MAX inf-norm, max (abs (x-y)) // INT64_MIN (-inf)-norm, min (abs (x-y)) // other: p-norm not yet computed double GB_helper10 // norm (x-y,p), or -1 on error ( GB_void *x_arg, // float or double, depending on type parameter bool x_iso, // true if x is iso GB_void *y_arg, // same type as x, treat as zero if NULL bool y_iso, // true if x is iso GrB_Type type, // GrB_FP32 or GrB_FP64 int64_t p, // 0, 1, 2, INT64_MIN, or INT64_MAX GrB_Index n ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- if (!(type == GrB_FP32 || type == GrB_FP64)) { // type of x and y must be GrB_FP32 or GrB_FP64 return ((double) -1) ; } if (n == 0) { return ((double) 0) ; } //-------------------------------------------------------------------------- // allocate workspace and determine # of threads to use //-------------------------------------------------------------------------- GB_NTHREADS (n) ; GB_ALLOCATE_WORK (double) ; #define X(k) x [x_iso ? 0 : k] #define Y(k) y [y_iso ? 0 : k] //-------------------------------------------------------------------------- // each thread computes its partial norm //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t k1, k2 ; GB_PARTITION (k1, k2, n, tid, nthreads) ; if (type == GrB_FP32) { //------------------------------------------------------------------ // FP32 case //------------------------------------------------------------------ float my_s = 0 ; const float *x = (float *) x_arg ; const float *y = (float *) y_arg ; switch (p) { case 0: // Frobenius norm case 2: // 2-norm: sqrt of sum of (x-y).^2 { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { float t = X (k) ; my_s += (t*t) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { float t = (X (k) - Y (k)) ; my_s += (t*t) ; } } } break ; case 1: // 1-norm: sum (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabsf (X (k)) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabsf (X (k) - Y (k)) ; } } } break ; case INT64_MAX: // inf-norm: max (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmaxf (my_s, fabsf (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmaxf (my_s, fabsf (X (k) - Y (k))) ; } } } break ; case INT64_MIN: // (-inf)-norm: min (abs (x-y)) { my_s = INFINITY ; if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fminf (my_s, fabsf (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fminf (my_s, fabsf (X (k) - Y (k))) ; } } } break ; default: ; // p-norm not yet supported } Work [tid] = (double) my_s ; } else { //------------------------------------------------------------------ // FP64 case //------------------------------------------------------------------ double my_s = 0 ; const double *x = (double *) x_arg ; const double *y = (double *) y_arg ; switch (p) { case 0: // Frobenius norm case 2: // 2-norm: sqrt of sum of (x-y).^2 { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { double t = X (k) ; my_s += (t*t) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { double t = (X (k) - Y (k)) ; my_s += (t*t) ; } } } break ; case 1: // 1-norm: sum (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabs (X (k)) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabs (X (k) - Y (k)) ; } } } break ; case INT64_MAX: // inf-norm: max (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmax (my_s, fabs (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmax (my_s, fabs (X (k) - Y (k))) ; } } } break ; case INT64_MIN: // (-inf)-norm: min (abs (x-y)) { my_s = INFINITY ; if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmin (my_s, fabs (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmin (my_s, fabs (X (k) - Y (k))) ; } } } break ; default: ; // p-norm not yet supported } Work [tid] = my_s ; } } //-------------------------------------------------------------------------- // combine results of each thread //-------------------------------------------------------------------------- double s = 0 ; switch (p) { case 0: // Frobenius norm case 2: // 2-norm: sqrt of sum of (x-y).^2 { for (int64_t tid = 0 ; tid < nthreads ; tid++) { s += Work [tid] ; } s = sqrt (s) ; } break ; case 1: // 1-norm: sum (abs (x-y)) { for (int64_t tid = 0 ; tid < nthreads ; tid++) { s += Work [tid] ; } } break ; case INT64_MAX: // inf-norm: max (abs (x-y)) { for (int64_t tid = 0 ; tid < nthreads ; tid++) { s = fmax (s, Work [tid]) ; } } break ; case INT64_MIN: // (-inf)-norm: min (abs (x-y)) { s = Work [0] ; for (int64_t tid = 1 ; tid < nthreads ; tid++) { s = fmin (s, Work [tid]) ; } } break ; default: // p-norm not yet supported s = -1 ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; return (s) ; }
decolorize.c
// convert the input RGB colored image into grayscale #include <stdio.h> #include <stdint.h> #include <string.h> #include "imcore.h" #include "prcore.h" #define RANDOM_SAMPLE_COUNT 1000000 // compute the signed distance between the two color according to // Color2Gray: Salience-Preserving Color Removal float color_distance(struct color_t c1, struct color_t c2, float theta, float alpha) { float L1, a1, b1; float L2, a2, b2; // Observer. = 2°, Illuminant = D65 rgb2lab(c1, &L1, &a1, &b1); rgb2lab(c2, &L2, &a2, &b2); #define crunch(x) (alpha * tanhf((x) / alpha)) float lDiff = fabsf(L1 - L2); float crDiff = square(a1-a2) + square(b1-b2); // if the luminance difference is much higher than the chrominance, use the Luminance difference if(lDiff > crunch(crDiff)) { return lDiff; } // find the angle between the dA and dB float diffSign = (a1 - a2) * cosf(theta) + (b1 - b2) * sinf(theta); if(diffSign > 0) { return crunch(crDiff); } else { return crunch(-crDiff); } #undef crunch } // http://www.cescript.com/2015/10/imge-renksizlestirme-image.html void decolorize(matrix_t *in, uint32_t sample_count, matrix_t *out) { // for loop iterators uint32_t i, j; // allocate out before use it matrix_resize(out, height(in), width(in), 1); // TODO: create in_data pointer based on the input type uint8_t *in_data = data(uint8_t, in); // create two color_t and distance arrays struct color_t *C1 = array_create(struct color_t, sample_count); struct color_t *C2 = array_create(struct color_t, sample_count); float *distance = array_create(float, sample_count); // select #sample_count random pixel difference for(i=0; i < sample_count; i++) { // select two random pixels inside image uint32_t x1 = random_int(0, width(in) - 1); uint32_t x2 = random_int(0, width(in) - 1); uint32_t y1 = random_int(0, height(in) - 1); uint32_t y2 = random_int(0, height(in) - 1); // compute the index of the pixel positions uint32_t idx1 = idx(in, y1, x1, 0); uint32_t idx2 = idx(in, y2, x2, 0); // get the RGB color of the input image C1[i] = RGB(in_data[idx1+2], in_data[idx1+1], in_data[idx1+0]); C2[i] = RGB(in_data[idx2+2], in_data[idx2+1], in_data[idx2+0]); // get the distance between the two colors distance[i] = color_distance(C1[i], C2[i], 3.14159f / 4.0f, 15); } // create a color weight table uint32_t table_length = 0; int32_t c1 = 0; int32_t c2 = 0;; float W[3][66] = {0.0f}; for(c1 = 0; c1 < 11; c1++) { // sum of the weights must be less than 1.0 for(c2 = 10 - c1; c2 >= 0; c2--) { // set the weights W[0][table_length] = c1 / 10.0f; W[1][table_length] = c2 / 10.0f; W[2][table_length] = (10 - c1 - c2) / 10.0f; // increase the table length table_length++; } } // find the best coefficients double minEg = INFINITY; float WR = 0.29f; float WG = 0.58f; float WB = 0.11f; for(j = 0; j < table_length; j++) { float wr = W[0][j]; float wg = W[1][j]; float wb = W[2][j]; double Eg = 0; for(i = 0; i < sample_count; i++) { // find the projection float dX = wr*C1[i].red + wg*C1[i].green + wb*C1[i].blue; float dY = wr*C2[i].red + wg*C2[i].green + wb*C2[i].blue; // sum the error Eg += square( (dX - dY) - distance[i] ); } // if the current error is the minimum save it if(Eg < minEg) { minEg = Eg; WR = wr; WG = wg; WB = wb; } } // print the found weights and errors printf("E[%3.2f %3.2f %3.2f]: %3.5f\n", WR, WG, WB, sqrt(minEg) / sample_count); // now convert the rgb image into grayscale using the weight minIdx uint8_t *out_data = data(uint8_t, out); //#pragma omp parallel for for(i = 0; i < width(in)*height(in); i++) { out_data[i] = (uint8_t) clamp( WB*in_data[3*i+0] + WG*in_data[3*i+1] + WR*in_data[3*i+2], 0,255); } // free up teh space array_free(C1); array_free(C2); array_free(distance); } int main(int argc, unsigned char *argv[]) { // read the test image unsigned char filename[256] = "..//data//impression_sunrise.bmp"; if(argc > 1) { strncpy(filename, argv[1], 256); } // read the input image matrix_t *image = imread(filename); // create the gray images matrix_t *gray_image = matrix_create(uint8_t); matrix_t *decolorized_image = matrix_create(uint8_t); // convert the input into grascale rgb2gray(image, gray_image); // decolorize the image decolorize(image, RANDOM_SAMPLE_COUNT, decolorized_image); imwrite(gray_image, "gray_image.bmp"); imwrite(decolorized_image, "decolorized_image.bmp"); return 0; }
sieve.c
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <math.h> int sieveOfEratosthenes(int n) { omp_set_num_threads(2) // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool*) malloc((n+1)*sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true,(n+1)*sizeof(bool)); int i, p; #pragma omp parallel for for (p=2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p #pragma omp parallel for for(i=p*2; i<=n; i += p) prime[i] = false; } } // count prime numbers #pragma omp parallel for reduction(+:primes) for (int p=2; p<=n; p++) if (prime[p]) primes++; return(primes); } int main() { int n = 100000000; printf("%d\n",sieveOfEratosthenes(n)); return 0; }
GB_unjumbled_template.c
//------------------------------------------------------------------------------ // GB_unjumble_template: unjumble the vectors of a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // get the task description //---------------------------------------------------------------------- const int64_t kfirst = A_slice [tid] ; const int64_t klast = A_slice [tid+1] ; //---------------------------------------------------------------------- // sort vectors kfirst to klast //---------------------------------------------------------------------- for (int64_t k = kfirst ; k < klast ; k++) { //------------------------------------------------------------------ // check if the vector needs sorting //------------------------------------------------------------------ bool jumbled = false ; const int64_t pA_start = Ap [k] ; const int64_t pA_end = Ap [k+1] ; int64_t ilast = -1 ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; if (i < ilast) { jumbled = true ; break ; } ilast = i ; } //------------------------------------------------------------------ // sort the vector //------------------------------------------------------------------ if (jumbled) { const int64_t aknz = pA_end - pA_start ; GB_QSORT ; } } } } #undef GB_QSORT
integrator_hermes.c
/** * @file integrator_hermes.c * @brief HERMES. A WHFAST/IAS15 hybrid integration scheme. * @author Ari Silburt <silburt@astro.utoronto.ca> * @details This file implements a hybrid integration scheme capable * of handling close encounters, simple collisions, and * planetesimal forces. Details are describe in Silburt et al (in prep). * * @section LICENSE * Copyright (c) 2016 Ari Silburt * * This file is part of rebound. * * rebound is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * rebound is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with rebound. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <time.h> #include <string.h> #include "rebound.h" #include "output.h" #include "integrator_ias15.h" #include "integrator_whfast.h" #define MIN(a, b) ((a) > (b) ? (b) : (a)) ///< Returns the minimum of a and b #define MAX(a, b) ((a) > (b) ? (a) : (b)) ///< Returns the maximum of a and b static void reb_integrator_hermes_check_for_encounter(struct reb_simulation* r); static void reb_integrator_hermes_additional_forces_mini(struct reb_simulation* mini); static void reb_integrator_hermes_apply_forces(const struct reb_simulation* r, double* a); static void reb_integrator_hermes_autocalc_HSF(struct reb_simulation* r); static void reb_integrator_hermes_get_ae(struct reb_simulation* r, struct reb_particle com, int index, double* a, double* e); void reb_integrator_hermes_part1(struct reb_simulation* r){ r->gravity_ignore_terms = 0; const int _N_active = ((r->N_active==-1)?r->N:r->N_active) - r->N_var; struct reb_simulation* mini = r->ri_hermes.mini; if (mini == NULL){ mini = reb_create_simulation(); r->ri_hermes.mini = mini; mini->visualization = REB_VISUALIZATION_NONE; // Disable visualiation mini->integrator = REB_INTEGRATOR_IAS15; mini->gravity = REB_GRAVITY_BASIC; mini->dt = r->dt; mini->additional_forces = reb_integrator_hermes_additional_forces_mini; mini->G = r->G; mini->softening = r->softening; if(r->collision_resolve_keep_sorted ==0) reb_warning(r,"When using HERMES, the user must set r->collision_resolve_keep_sorted = 1, or else it is likely that the wrong particle will be removed from the simulation during a collision/ejection, leading to energy jumps and other unpredictable behaviour. This warning will only appear once.\n"); } mini->ri_hermes.global = r; //set to != 0 so that collision.c knows to remove from both mini->testparticle_type = r->testparticle_type; mini->collision = r->collision; mini->collision_resolve = r->collision_resolve; mini->collision_resolve_keep_sorted = r->collision_resolve_keep_sorted; mini->track_energy_offset = r->track_energy_offset; mini->force_is_velocity_dependent = r->force_is_velocity_dependent; mini->post_timestep_modifications = r->post_timestep_modifications; // Remove all particles from mini mini->t = r->t; int mini_previously_active = r->ri_hermes.mini_active; mini->N = 0; mini->energy_offset = 0.; r->ri_hermes.mini_active = 0; r->ri_hermes.global_index_from_mini_index_N = 0; r->ri_hermes.collision_this_global_dt = 0; if (_N_active>r->ri_hermes.a_Nmax){ r->ri_hermes.a_i = realloc(r->ri_hermes.a_i,sizeof(double)*3*_N_active); r->ri_hermes.a_f = realloc(r->ri_hermes.a_f,sizeof(double)*3*_N_active); r->ri_hermes.a_Nmax = _N_active; } //reset is_in_mini if (r->N>r->ri_hermes.is_in_mini_Nmax){ r->ri_hermes.is_in_mini_Nmax = r->N; r->ri_hermes.is_in_mini = realloc(r->ri_hermes.is_in_mini,r->N*sizeof(int)); } for(int i=_N_active;i<r->N;i++)r->ri_hermes.is_in_mini[i] = 0; // Add all massive particles for (int i=0; i<_N_active; i++){ reb_add(r->ri_hermes.mini, r->particles[i]); r->ri_hermes.is_in_mini[i] = 1; if (r->ri_hermes.global_index_from_mini_index_N>=r->ri_hermes.global_index_from_mini_index_Nmax){ while(r->ri_hermes.global_index_from_mini_index_N>=r->ri_hermes.global_index_from_mini_index_Nmax) r->ri_hermes.global_index_from_mini_index_Nmax += 32; r->ri_hermes.global_index_from_mini_index = realloc(r->ri_hermes.global_index_from_mini_index,r->ri_hermes.global_index_from_mini_index_Nmax*sizeof(int)); } r->ri_hermes.global_index_from_mini_index[r->ri_hermes.global_index_from_mini_index_N] = i; r->ri_hermes.global_index_from_mini_index_N++; } r->ri_hermes.mini->N_active = _N_active; // Determine HSF r->ri_hermes.current_hill_switch_factor = r->ri_hermes.hill_switch_factor; if(r->ri_hermes.adaptive_hill_switch_factor){ reb_integrator_hermes_autocalc_HSF(r); // increases current_hill_switch_factor is needed } reb_integrator_hermes_check_for_encounter(r); if (r->N != r->ri_hermes.mini->N || mini_previously_active==0) { reb_integrator_ias15_clear(r->ri_hermes.mini); } reb_integrator_hermes_apply_forces(r, r->ri_hermes.a_i); r->ri_whfast.coordinates = REB_WHFAST_COORDINATES_DEMOCRATICHELIOCENTRIC; reb_integrator_whfast_part1(r); } void reb_integrator_hermes_part2(struct reb_simulation* r){ reb_integrator_whfast_part2(r); reb_integrator_hermes_apply_forces(r, r->ri_hermes.a_f); struct reb_simulation* mini = r->ri_hermes.mini; r->ri_hermes.steps++; if (r->ri_hermes.mini_active){ r->ri_hermes.steps_miniactive++; r->ri_hermes.steps_miniN += mini->N; reb_integrate(mini,r->t); for (int i=0; i<mini->N; i++){ r->particles[r->ri_hermes.global_index_from_mini_index[i]] = mini->particles[i]; r->particles[r->ri_hermes.global_index_from_mini_index[i]].sim = r; } // Correct for energy jump in collision r->energy_offset += r->ri_hermes.mini->energy_offset; } } void reb_integrator_hermes_synchronize(struct reb_simulation* r){ // Do nothing. reb_integrator_whfast_synchronize(r); } void reb_integrator_hermes_reset(struct reb_simulation* r){ //r->ri_hermes.timestep_too_large_warning = 0.; //Don't think we want to reset the warning. r->ri_hermes.steps = 0; r->ri_hermes.steps_miniactive = 0; r->ri_hermes.steps_miniN = 0; reb_integrator_whfast_reset(r); if (r->ri_hermes.mini){ reb_free_simulation(r->ri_hermes.mini); r->ri_hermes.mini = NULL; } if(r->ri_hermes.global_index_from_mini_index){ free(r->ri_hermes.global_index_from_mini_index); r->ri_hermes.global_index_from_mini_index = NULL; r->ri_hermes.global_index_from_mini_index_Nmax = 0; } if(r->ri_hermes.is_in_mini){ free(r->ri_hermes.is_in_mini); r->ri_hermes.is_in_mini = NULL; r->ri_hermes.is_in_mini_Nmax = 0; } if(r->ri_hermes.a_i){ free(r->ri_hermes.a_i); } if(r->ri_hermes.a_f){ free(r->ri_hermes.a_f); } r->ri_hermes.a_Nmax = 0; } static void reb_integrator_hermes_check_for_encounter(struct reb_simulation* global){ struct reb_simulation* mini = global->ri_hermes.mini; const int _N_active = ((global->N_active==-1)?global->N:global->N_active) - global->N_var; struct reb_particle* global_particles = global->particles; struct reb_particle p0 = global_particles[0]; double solar_check = global->ri_hermes.solar_switch_factor*p0.r; double solar_check2 = solar_check*solar_check; double current_hill_switch_factor = global->ri_hermes.current_hill_switch_factor; double hill_switch_factor2 = current_hill_switch_factor*current_hill_switch_factor; double min_dt_enc2 = INFINITY; for (int i=0; i<_N_active; i++){ struct reb_particle pi = global_particles[i]; const double dxi = p0.x - pi.x; const double dyi = p0.y - pi.y; const double dzi = p0.z - pi.z; const double r0i2 = dxi*dxi + dyi*dyi + dzi*dzi; const double mi = pi.m/(p0.m*3.); double rhi = pow(mi*mi*r0i2*r0i2*r0i2,1./6.); for(int j=i+1;j<global->N;j++){ struct reb_particle pj = global_particles[j]; const double dxj = p0.x - pj.x; const double dyj = p0.y - pj.y; const double dzj = p0.z - pj.z; const double r0j2 = dxj*dxj + dyj*dyj + dzj*dzj; const double mj = pj.m/(p0.m*3.); double rhj = pow(mj*mj*r0j2*r0j2*r0j2,1./6.); const double rh_sum = rhi+rhj; const double rh_sum2 = rh_sum*rh_sum; const double dx = pi.x - pj.x; const double dy = pi.y - pj.y; const double dz = pi.z - pj.z; const double rij2 = dx*dx + dy*dy + dz*dz; if((rij2 < hill_switch_factor2*rh_sum2 && i>0) || (rij2 < solar_check2 && i==0)){ global->ri_hermes.mini_active = 1; // Monitor hill radius/relative velocity const double dvx = pi.vx - pj.vx; const double dvy = pi.vy - pj.vy; const double dvz = pi.vz - pj.vz; const double vij2 = dvx*dvx + dvy*dvy + dvz*dvz; const double dt_enc2 = hill_switch_factor2*rh_sum2/vij2; min_dt_enc2 = MIN(min_dt_enc2,dt_enc2); if (j>=_N_active && global->ri_hermes.is_in_mini[j]==0){//make sure not already added // Add particle to mini simulation reb_add(mini,pj); global->ri_hermes.is_in_mini[j] = 1; if (global->ri_hermes.global_index_from_mini_index_N>=global->ri_hermes.global_index_from_mini_index_Nmax){ while(global->ri_hermes.global_index_from_mini_index_N>=global->ri_hermes.global_index_from_mini_index_Nmax) global->ri_hermes.global_index_from_mini_index_Nmax += 32; global->ri_hermes.global_index_from_mini_index = realloc(global->ri_hermes.global_index_from_mini_index,global->ri_hermes.global_index_from_mini_index_Nmax*sizeof(int)); } global->ri_hermes.global_index_from_mini_index[global->ri_hermes.global_index_from_mini_index_N] = j; global->ri_hermes.global_index_from_mini_index_N++; } } } } if (global->ri_hermes.adaptive_hill_switch_factor==0 && global->ri_hermes.timestep_too_large_warning==0 && min_dt_enc2 < 16.*global->dt*global->dt){ global->ri_hermes.timestep_too_large_warning = 1; reb_warning(global,"The timestep is likely too large. Close encounters might be missed. Decrease the timestep or increase the switching radius. This warning will appear only once."); } } //get min encounter time between overlapping orbits static void reb_integrator_hermes_autocalc_HSF(struct reb_simulation* r){ const int _N_active = ((r->N_active==-1)?r->N:r->N_active) - r->N_var; struct reb_particle com = reb_get_com(r); const double mu = r->G*r->particles[0].m; struct reb_particle* particles = r->particles; double min_dt_enc2 = INFINITY; double m0 = particles[0].m; int* is_in_mini = r->ri_hermes.is_in_mini; for(int i=1;i<_N_active;i++){ //run over massive bodies double ep, ap; reb_integrator_hermes_get_ae(r, com, i, &ap, &ep); double rp_min = ap*(1.-ep); double rp_max = ap*(1.+ep); double np = sqrt(mu/(ap*ap*ap)); for(int j=i+1;j<r->N;j++){ //run over massive + planetesimal bodies if(is_in_mini[j] == 1) continue; //exclude bodies in mini from Auto HSF calc double e, a, n; reb_integrator_hermes_get_ae(r, com, j, &a, &e); double r_min = a*(1.-e); double r_max = a*(1.+e); double vphi_max_r=0., vr_max_r=0., global_max_r=0., sinf_max_r=0.; double vphi_max_rp=0., vr_max_rp=0., global_max_rp=0., sinf_max_rp=0.; if((rp_min<r_min)&&(rp_max>r_max)){ //CASE1: massive planet totally overlaps planetesimal n = sqrt(mu/(a*a*a)); vphi_max_r = n*a*(1.+e)/sqrt(1.-e*e); //vphi_max is at r_min = a*(1.-e) vphi_max_rp = np*ap*ap*(1.-ep*ep)/(a*(1.-e)*sqrt(1.-ep*ep)); //vphi_max_rp @ r_min vr_max_r = n*a*e/sqrt(1.-e*e); //vr_max_r is at r = a*(1.-e^2.) global_max_rp = ap*(1.-ep*ep); //the distance corresponding to the global vr_max_rp if((global_max_rp>r_max)||(global_max_rp<r_min)){ //take max of boundaries (r_min and r_max) sinf_max_rp = sqrt(MAX(1.-pow(global_max_rp/(r_min*ep)-1./ep,2.), 1.-pow(global_max_rp/(r_max*ep)-1./ep,2.))); vr_max_rp = np*ap*ep/sqrt(1.-ep*ep) * sinf_max_rp; } else { vr_max_rp = np*ap*ep/sqrt(1.-ep*ep); } } else if((r_min<rp_min)&&(r_max>rp_max)){ //CASE2: planetesimal totally overlaps planet n = sqrt(mu/(a*a*a)); vphi_max_rp = np*ap*(1.+ep)/sqrt(1.-ep*ep); vphi_max_r = n*a*a*(1.-e*e)/(ap*(1.-ep)*sqrt(1.-e*e)); vr_max_rp = np*ap*ep/sqrt(1.-ep*ep); global_max_r = a*(1.-e*e); if((global_max_r>rp_max)||(global_max_r<rp_min)){ //take max of boundaries (rp_min and rp_max) sinf_max_r = sqrt(MAX(1.-pow(global_max_r/(rp_min*e)-1./e,2.), 1.-pow(global_max_r/(rp_max*e)-1./e,2.))); vr_max_r = n*a*e/sqrt(1.-e*e) * sinf_max_r; } else {vr_max_r = n*a*e/sqrt(1.-e*e);} } else if((rp_max>r_max)&&(r_max>rp_min)){ //CASE3: partial overlap (planetesimal=inner body), boundaries: inner=rp_min, outer=r_max n = sqrt(mu/(a*a*a)); vphi_max_r = n*a*a*(1.-e*e)/(ap*(1.-ep)*sqrt(1.-e*e)); vphi_max_rp = np*ap*(1.+ep)/sqrt(1.-ep*ep); global_max_r = a*(1.-e*e); if(global_max_r<rp_min){ //Since r_max is a minimum of vr, vr_max_r must be at rp_min vr_max_r = n*a*e*sqrt((1.-pow(global_max_r/(rp_min*e)-1./e,2.))/(1.-e*e)); } else {vr_max_r = n*a*e/sqrt(1.-e*e);} global_max_rp = ap*(1.-ep*ep); if(global_max_rp>r_max){ //Since rp_min is a minimum of vr, vr_max_rp must be at r_max vr_max_rp = np*ap*ep*sqrt((1.-pow(global_max_rp/(r_max*ep)-1./ep,2.))/(1.-ep*ep)); } else {vr_max_rp = np*ap*ep/sqrt(1.-ep*ep);} } else if((r_max>rp_max)&&(rp_max>r_min)){ //CASE4: partial overlap (planet=inner body), boundaries: inner=r_min, outer=rp_max n = sqrt(mu/(a*a*a)); vphi_max_r = n*a*(1.+e)/sqrt(1.-e*e); vphi_max_rp = np*ap*ap*(1.-ep*ep)/(a*(1.-e)*sqrt(1.-ep*ep)); global_max_r = a*(1.-e*e); if(global_max_r>rp_max){ //Since r_min is a minimum of vr, vr_max_r must be at rp_max vr_max_r = n*a*e*sqrt((1.-pow(global_max_r/(rp_max*e)-1./e,2.))/(1.-e*e)); } else {vr_max_r = n*a*e/sqrt(1.-e*e);} global_max_rp = ap*(1.-ep*ep); if(global_max_rp<r_min){ //Since rp_max is a minimum of vr, vr_max_rp must be at r_min vr_max_rp = np*ap*ep*sqrt((1.-pow(global_max_rp/(r_min*ep)-1./ep,2.))/(1.-ep*ep)); } else {vr_max_rp = np*ap*ep/sqrt(1.-ep*ep);} } //We calculate vrel_max the following way since it can be solved analytically. The correct way to find vrel_max is not easily //done (leads to a quartic soln). This estimate is guaranteed to be larger than the correct way, leading to a more conservative //estimate of min_dt_enc. double vrel_max2 = (vr_max_rp+vr_max_r)*(vr_max_rp+vr_max_r) + (vphi_max_rp-vphi_max_r)*(vphi_max_rp-vphi_max_r); if(vrel_max2 > 0.){ double rhill_sum = ap*pow(particles[i].m/(3.*m0),1./3.) + a*pow(particles[j].m/(3.*m0),1./3.); double dt_enc2 = rhill_sum*rhill_sum/vrel_max2; min_dt_enc2 = MIN(min_dt_enc2,dt_enc2); } } } if(min_dt_enc2 < INFINITY){ double dt2 = 16.*r->dt*r->dt; //Factor of 4:hill sphere > 4*length scales for wiggle room double HSF_new = sqrt(dt2/min_dt_enc2); double base = 1.25; double exp = ceilf(log10(HSF_new)/log10(base)); //round HSF up to nearest multiple of 1.25 HSF_new = pow(base,exp); r->ri_hermes.current_hill_switch_factor = MAX(r->ri_hermes.current_hill_switch_factor, HSF_new); // Increase HSF if needed } } static void reb_integrator_hermes_apply_forces(const struct reb_simulation* r, double* a){ int* is_in_mini = r->ri_hermes.is_in_mini; double G = r->G; const int _N_active = ((r->N_active==-1)?r->N:r->N_active) - r->N_var; for (int i = 0; i<_N_active; i++){ struct reb_particle pm = r->particles[i]; double ax = 0.; double ay = 0.; double az = 0.; for (int j = _N_active; j<r->N; j++){ if (is_in_mini[j] == 0){ struct reb_particle ps = r->particles[j]; double dx = ps.x - pm.x; double dy = ps.y - pm.y; double dz = ps.z - pm.z; double d = sqrt(dx*dx + dy*dy + dz*dz); ax += ps.m * dx * G/(d*d*d); ay += ps.m * dy * G/(d*d*d); az += ps.m * dz * G/(d*d*d); } } a[i*3+0] = ax; a[i*3+1] = ay; a[i*3+2] = az; } } // This is the current algorithm, interpolating forces static void reb_integrator_hermes_additional_forces_mini(struct reb_simulation* mini){ struct reb_simulation* global = mini->ri_hermes.global; if (mini->testparticle_type){ struct reb_particle* mini_particles = mini->particles; const double t_prev = global->t - global->dt; double timefac = (mini->t - t_prev)/global->dt; double* a_i = global->ri_hermes.a_i; double* a_f = global->ri_hermes.a_f; // TODO: See if the following is good enough and if so why // timefac = 0.5; #pragma omp parallel for schedule(guided) for(int i=0;i<mini->N_active;i++){ //massive bodies in mini double ax0 = a_i[i*3+0]; double ay0 = a_i[i*3+1]; double az0 = a_i[i*3+2]; double ax1 = a_f[i*3+0]; double ay1 = a_f[i*3+1]; double az1 = a_f[i*3+2]; mini_particles[i].ax += ax0*(1.-timefac) + ax1*timefac; mini_particles[i].ay += ay0*(1.-timefac) + ay1*timefac; mini_particles[i].az += az0*(1.-timefac) + az1*timefac; } } if(global->additional_forces){ global->additional_forces(mini); } } static void reb_integrator_hermes_get_ae(struct reb_simulation* r, struct reb_particle com, int index, double* a, double* e){ const double G = r->G; const double mu = G*r->particles[0].m; const double muinv = 1./mu; struct reb_particle* particles = r->particles; struct reb_particle p = particles[index]; const double dvx = p.vx-com.vx; const double dvy = p.vy-com.vy; const double dvz = p.vz-com.vz; const double dx = p.x-com.x; const double dy = p.y-com.y; const double dz = p.z-com.z; const double v2 = dvx*dvx + dvy*dvy + dvz*dvz; const double d = sqrt(dx*dx + dy*dy + dz*dz); //distance const double dinv = 1./d; const double vr = (dx*dvx + dy*dvy + dz*dvz)*dinv; const double ex = muinv*( (v2-mu*dinv)*dx - d*vr*dvx ); const double ey = muinv*( (v2-mu*dinv)*dy - d*vr*dvy ); const double ez = muinv*( (v2-mu*dinv)*dz - d*vr*dvz ); *e = sqrt( ex*ex + ey*ey + ez*ez ); //eccentricity *a = -mu/(v2 - 2.*mu*dinv); //semi-major axis }
random_forest.h
#ifndef __RANDOM_FOREST_H__ #define __RANDOM_FOREST_H__ #include "../utils/utils.h" #include "../utils/random.h" using namespace RandomNumbers; namespace RandomForestRelated { vector<double> featureImportance; int K_OUT_OF_N = 1000; int RANDOM_FEATURES = 4; int RANDOM_POSITIONS = 8; const int CLASSIFICATION = 0xa001; const int REGRESSION = 0xa002; const int SURVIVAL = 0xa003; int TASK_TYPE = CLASSIFICATION; // default inline double binaryEntropy(int p1, int total) { if (p1 == 0 || p1 == total) { return 0; } double p = p1 / (double)total; return - p * log2(p) - (1 - p) * log2(1 - p); } inline double calculateLoss(const vector<int> IDs, const vector<double> &labels) { if (TASK_TYPE == CLASSIFICATION) { // entropy unordered_map<int, int> hist; FOR (id, IDs) { ++ hist[(int)labels[*id]]; } double entropy = 0; FOR (iter, hist) { double p = iter->second / (double) IDs.size(); entropy += -p * log(p); } return entropy; } else if (TASK_TYPE == REGRESSION) { // mean cube error double avg = 0; FOR (id, IDs) { avg += labels[*id]; } if (IDs.size()) { avg /= IDs.size(); } double cubeError = 0; FOR (id, IDs) { cubeError += sqr(avg - labels[*id]) * fabs(avg - labels[*id]); } return cubeError / IDs.size(); } else if (TASK_TYPE == SURVIVAL) { cerr << "TODO survival" << endl; exit(-1); } else { myAssert(false, "Unknown Task Type!"); } return 0; } struct TreeNode { bool leaf; int level, feature; double value, result; int left, right; TreeNode() { leaf = false; level = feature = left = right = -1; value = result = 0; } }; class DecisionTree { public: vector<TreeNode> nodes; void dump(FILE* out) { size_t size = nodes.size(); fwrite(&size, sizeof(size), 1, out); if (size > 0) { fwrite(&nodes[0], sizeof(nodes[0]), size, out); } } void load(FILE* in) { size_t size; fread(&size, sizeof(size), 1, in); nodes.resize(size); if (size > 0) { fread(&nodes[0], sizeof(nodes[0]), size, in); } } DecisionTree() {} void train(const vector< vector<double> > &features, const vector<double> &results, int minNodeSize, int maxLevel = 18, vector<string> featureNames = vector<string>()) { int threadID = omp_get_thread_num(); if (features.size() == 0) { return; } vector< vector<int> > featureGroups; if (featureNames.size() != 0) { myAssert(featureNames.size() == features[0].size(), "[ERROR] feature names and feature dimensions mismatch!"); unordered_map<string, int> name2id; for (int i = 0; i < featureNames.size(); ++ i) { string name = featureNames[i]; if (name.find("=") != -1) { name = name.substr(0, name.find("=")); } if (!name2id.count(name)) { name2id[name] = featureGroups.size(); featureGroups.push_back(vector<int>()); } featureGroups[name2id[name]].push_back(i); } } else { for (int i = 0; i < features[0].size(); ++ i) { featureGroups.push_back(vector<int>(1, i)); } } TreeNode root; root.level = 0; nodes.push_back(root); // bootstrapping vector<int> index[2]; for (int i = 0; i < (int)results.size(); ++ i) { index[(int)results[i]].push_back(i); } vector<int> rootBag; for (int type = 0; type < 2; ++ type) { int selected = min(K_OUT_OF_N, (int)(min(index[0].size(), index[1].size()))); for (int i = 0; i < selected; ++ i) { int id = index[type][rng[threadID].next(index[type].size())]; rootBag.push_back(id); } } /*vector<int> rootBag; int samplesN = max((int)results.size(), 100); for (int i = 0; i < samplesN; ++ i) { rootBag.push_back(rng[threadID].next(results.size())); }*/ vector<vector<int>> nodeBags; nodeBags.push_back(rootBag); for (int curNode = 0; curNode < (int)nodes.size(); ++ curNode) { TreeNode &node = nodes[curNode]; vector<int> &bag = nodeBags[curNode]; if (bag.size() == 0) { // a null tree node.leaf = true; node.result = 0; continue; } myAssert(bag.size() > 0, "[ERROR] empty node in decision tree!"); bool equal = true; double first = results[bag[0]]; for (int i = 1; i < (int)bag.size(); ++ i) { if (sign(results[bag[i]] - first)) { equal = false; break; } } if (equal || (int)bag.size() < minNodeSize * 2 || node.level >= maxLevel) { // leaf node.leaf = true; for (int i = 0; i < (int)bag.size(); ++ i) { node.result += results[bag[i]]; } node.result /= bag.size(); continue; } myAssert(bag.size() >= minNodeSize, "[ERROR] bag is too small!"); double bagLoss = calculateLoss(bag, results); int bestFeature = -1; int bestLeft = 0, bestRight = 0; double bestValue = 0; double bestLoss = 1e100; vector<int> leftBag, rightBag; for (int _ = 0; _ < RANDOM_FEATURES; ++ _) { int groupID = rng[threadID].next(featureGroups.size()); int featureID = featureGroups[groupID][rng[threadID].next(featureGroups[groupID].size())]; for (int __ = 0; __ < RANDOM_POSITIONS; ++ __) { int instanceID = bag[rng[threadID].next(bag.size())]; double splitValue = features[instanceID][featureID]; vector<int> currentLeftBag, currentRightBag; for (int i = 0; i < (int)bag.size(); ++ i) { int id = bag[i]; if (features[id][featureID] < splitValue) { currentLeftBag.push_back(id); } else { currentRightBag.push_back(id); } } if (currentLeftBag.size() < minNodeSize || currentRightBag.size() < minNodeSize) { continue; } double currentLoss = (calculateLoss(currentLeftBag, results) * currentLeftBag.size() + calculateLoss(currentRightBag, results) * currentRightBag.size()) / bag.size(); if (currentLoss < bestLoss) { bestLoss = currentLoss; bestValue = splitValue; bestFeature = featureID; leftBag = currentLeftBag; rightBag = currentRightBag; } } } if (leftBag.size() < minNodeSize || rightBag.size() < minNodeSize) { // leaf node.leaf = true; for (int i = 0; i < (int)bag.size(); ++ i) { node.result += results[bag[i]]; } node.result /= bag.size(); continue; } myAssert(leftBag.size() >= minNodeSize && rightBag.size() >= minNodeSize, "[ERROR] bag is too small"); featureImportance[bestFeature] += bagLoss - bestLoss; double nextValue = -1e100; for (int i = 0; i < (int)leftBag.size(); ++ i) { int id = leftBag[i]; nextValue = max(nextValue, features[id][bestFeature]); } TreeNode left, right; left.level = right.level = node.level + 1; node.feature = bestFeature; node.value = (bestValue + nextValue) / 2; node.left = nodes.size(); node.right = nodes.size() + 1; nodes.push_back(left); nodes.push_back(right); nodeBags.push_back(leftBag); nodeBags.push_back(rightBag); } nodes.shrink_to_fit(); } double estimate(vector<double> &features) { TreeNode *current = &nodes[0]; while (!current->leaf) { if (features[current->feature] < current->value) { current = &nodes[current->left]; } else { current = &nodes[current->right]; } } return current->result; } }; class RandomForest { vector<DecisionTree> trees; vector< vector<double> > features; vector<double> results; public: void dump(string filename) { FILE* out = fopen(filename.c_str(), "wb"); size_t size = trees.size(); fwrite(&size, sizeof(size), 1, out); for (size_t i = 0; i < trees.size(); ++ i) { trees[i].dump(out); } fclose(out); } void load(string filename) { FILE* in = fopen(filename.c_str(), "rb"); size_t size; fread(&size, sizeof(size), 1, in); trees.resize(size); for (size_t i = 0; i < trees.size(); ++ i) { trees[i].load(in); } fclose(in); } void clear() { features.clear(); results.clear(); trees.clear(); } void train(vector< vector<double> > &_features, vector<double> _results, int treesNo = 100, int minNodeSize = 100, int maxLevel = 100, vector<string> featureNames = vector<string>()) { if (features.size() == 0) { features = _features; results = _results; if (features.size() > 0) { featureImportance.clear(); featureImportance.resize(features[0].size(), 0); } } myAssert(features.size() == results.size(), "[ERROR] wrong training data!"); trees.resize(treesNo); #pragma omp parallel for for (int i = 0; i < treesNo; ++ i) { trees[i].train(_features, _results, minNodeSize, maxLevel, featureNames); } } double estimate(vector<double> &features) { if (trees.size() == 0) { return 0.0; } double sum = 0; # pragma omp parallel for reduction (+:sum) for (int i = 0; i < (int)trees.size(); ++ i) { sum += trees[i].estimate(features); } return sum / trees.size(); } }; }; #endif
Scalar3DUpdater6.h
/* * BCMTools * * Copyright (C) 2011-2014 Institute of Industrial Science, The University of Tokyo. * All rights reserved. * * Copyright (c) 2012-2014 Advanced Institute for Computational Science, RIKEN. * All rights reserved. * */ /// /// @file Scalar3DUpdater6.h /// @brief スカラデータクラス仮想セルアップデータ /// #ifndef SCALAR_3D_UPDATER6_H #define SCALAR_3D_UPDATER6_H #include "BCMTools.h" #include "VCUpdater.h" #include "Scalar3D.h" #ifdef BCMT_NAMESPACE namespace BCMT_NAMESPACE { #endif /// スカラデータクラス仮想セルアップデータ. /// /// @note 通信と補間の順序は,簡単のためL→L+1もL+1→Lも, /// 送信元で補間を行なってから通信. /// /// @todo 補間計算部分をFortranで実装 /// /// template <typename T> class Scalar3DUpdater6 : public VCUpdater { private: Scalar3D<T>* dataClass; ///< 仮想セル同期対象データクラス T* sendBuffer[NUM_FACE][NUM_SUBFACE]; ///< 送信データバッファテーブル T* recvBuffer[NUM_FACE][NUM_SUBFACE]; ///< 受信データバッファテーブル Scalar3D<T>* neighborDataClass[NUM_FACE][NUM_SUBFACE]; ///< 隣接データクラステーブル int nx, ny, nz, vc; T target_id; public: /// コンストラクタ. /// /// @param[in] neighborInfo 隣接情報配列 /// @param[in] comm MPIコミュニケータ(ディフォルトMPI::COMM_WORLD) /// Scalar3DUpdater6(const NeighborInfo* neighborInfo, const MPI::Comm& comm = MPI::COMM_WORLD) : VCUpdater(neighborInfo, comm) { clearCommBufferPointer(); clearNeighbor(); target_id = 1; } /// デストラクタ. ~Scalar3DUpdater6() {} /// 仮想セル同期対象データクラスを登録. void setDataClass(DataClass* dc) { dataClass = dynamic_cast<Scalar3D<T>*>(dc); nx = dataClass->getSizeX(); ny = dataClass->getSizeY(); nz = dataClass->getSizeZ(); vc = dataClass->getVCSize(); } /// 仮想セル同期データ送信に必要なバッファサイズを取得(同レベル間). size_t getSendBufferByteSize(Face face) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ送信に必要なバッファサイズを取得(レベルL+1→L). size_t getSendBufferByteSizeF2C(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face) / 4; } /// 仮想セル同期データ送信に必要なバッファサイズを取得(レベルL→L+1). size_t getSendBufferByteSizeC2F(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ受信に必要なバッファサイズを取得(同レベル間). size_t getRecvBufferByteSize(Face face) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ受信に必要なバッファサイズを取得(レベルL+1→L). size_t getRecvBufferByteSizeF2C(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face) / 4; } /// 仮想セル同期データ受信に必要なバッファサイズを取得(レベルL→L+1). size_t getRecvBufferByteSizeC2F(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ送信バッファ用PointerSetterオブジェクトを取得. PointerSetterBase* getSendBufferPointerSetter(Face face, Subface subface) { return new PointerSetter<T>(&sendBuffer[face][subface]); } /// 仮想セル同期データ受信バッファ用PointerSetterオブジェクトを取得. PointerSetterBase* getRecvBufferPointerSetter(Face face, Subface subface) { return new PointerSetter<T>(&recvBuffer[face][subface]); } public: /// 同並列計算ノード内の隣接データクラスを登録. void setNeighbor(Face face, Subface subface, DataClass* dataClass) { neighborDataClass[face][subface] = dynamic_cast<Scalar3D<T>*>(dataClass); } /// 隣接データクラスの登録解除. void clearNeighbor(Face face, Subface subface) { neighborDataClass[face][subface] = 0; } /// 隣接データクラスの登録解除. void clearNeighbor() { for (int i = 0; i < NUM_FACE; ++i) { for (int j = 0; j < NUM_SUBFACE; ++j) { clearNeighbor(Face(i), Subface(j)); } } } /// 通信バッファテーブルのエントリをクリア. void clearCommBufferPointer(Face face, Subface subface) { sendBuffer[face][subface] = recvBuffer[face][subface] = 0; } /// 通信バッファテーブルをクリア. void clearCommBufferPointer() { for (int i = 0; i < NUM_FACE; ++i) { for (int j = 0; j < NUM_SUBFACE; ++j) { clearCommBufferPointer(Face(i), Subface(j)); } } } private: /// 通信バッファサイズを計算. size_t getCommBufferSize(Face face) const { switch (face) { case X_M: case X_P: return ny * nz * vc; case Y_M: case Y_P: return nz * nx * vc; case Z_M: case Z_P: return nx * ny * vc; default: Exit(EX_FAILURE); } /* NOTREACHED */ } /* /// レベルL+1→Lの線形補間 (細f(i,j,k) → 粗c(I,J,K)). T interpolateF2C(const Scalar3D<T>& f, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; return 0.125 * (f(i,j,k) + f(i+1,j,k) + f(i,j+1,k) + f(i+1,j+1,k) + f(i,j,k+1) + f(i+1,j,k+1) + f(i,j+1,k+1) + f(i+1,j+1,k+1)); } /// レベルL+1→Lの線形補間 (細f(i,j,k) → 粗c(I,J,K)). T interpolateF2C(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; return 0.125 * (fData[fIndex(i ,j ,k )] + fData[fIndex(i+1,j ,k )] + fData[fIndex(i ,j+1,k )] + fData[fIndex(i+1,j+1,k )] + fData[fIndex(i ,j ,k+1)] + fData[fIndex(i+1,j ,k+1)] + fData[fIndex(i ,j+1,k+1)] + fData[fIndex(i+1,j+1,k+1)]); } /// レベルL→L+1の線形補間 (粗c(I,J,K) → 細f(i,j,k)). T interpolateC2F(const Scalar3D<T>& c, int i, int j, int k) { int I, J, K; double r, s, t; linearInterpolate(i, nx, I, r); linearInterpolate(j, ny, J, s); linearInterpolate(k, nz, K, t); return (1.0-t)*( (1.0-s)*( (1.0-r)*c(I ,J ,K ) + r*c(I+1,J ,K ) ) + s*( (1.0-r)*c(I ,J+1,K ) + r*c(I+1,J+1,K ) ) ) +t*( (1.0-s)*( (1.0-r)*c(I ,J ,K+1) + r*c(I+1,J ,K+1) ) + s*( (1.0-r)*c(I ,J+1,K+1) + r*c(I+1,J+1,K+1) ) ); } /// レベルL→L+1の線形補間 (粗c(I,J,K) → 細f(i,j,k)). T interpolateC2F(const T* cData, const Index3DS& cIndex, int i, int j, int k) { int I, J, K; double r, s, t; linearInterpolate(i, nx, I, r); linearInterpolate(j, ny, J, s); linearInterpolate(k, nz, K, t); return (1.0-t)*( (1.0-s)*( (1.0-r)*cData[cIndex(I ,J ,K )] + r*cData[cIndex(I+1,J ,K )] ) + s*( (1.0-r)*cData[cIndex(I ,J+1,K )] + r*cData[cIndex(I+1,J+1,K )] ) ) +t*( (1.0-s)*( (1.0-r)*cData[cIndex(I ,J ,K+1)] + r*cData[cIndex(I+1,J ,K+1)] ) + s*( (1.0-r)*cData[cIndex(I ,J+1,K+1)] + r*cData[cIndex(I+1,J+1,K+1)] ) ); } /// C2F補間における補間パラメータの計算. /// /// @note 端点では,内挿ではなく外挿 /// void linearInterpolate(int i, int n, int& I, double& r) { #if 1 I = std::min(std::max(i/2 - 1 + i%2, 0), n - 2); r = -0.25 + 0.5 * i - double(I); #else if (i == 0) { // 外挿 I = 0; r = -0.25; } else if (i == 2*n-1) { // 外挿 I = n - 2; r = 1.25; } else if (i%2 == 0) { I = i/2 - 1; r = 0.75; } else { I = i/2; r = 0.25; } #endif } */ /// レベルL+1→Lの線形補間 (細f(i,j,k) → 粗c(I,J,K)). T interpolateF2C(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; if( fData[fIndex(i ,j ,k )] == target_id || fData[fIndex(i+1,j ,k )] == target_id || fData[fIndex(i ,j+1,k )] == target_id || fData[fIndex(i ,j ,k+1)] == target_id || fData[fIndex(i ,j+1,k+1)] == target_id || fData[fIndex(i+1,j ,k+1)] == target_id || fData[fIndex(i+1,j+1,k )] == target_id || fData[fIndex(i+1,j+1,k+1)] == target_id ) { return target_id; } return -1; } T interpolateF2C_X_M(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; if( fData[fIndex(i+1,j ,k )] == target_id || fData[fIndex(i+1,j ,k+1)] == target_id || fData[fIndex(i+1,j+1,k )] == target_id || fData[fIndex(i+1,j+1,k+1)] == target_id ) { return target_id; } return -1; } T interpolateF2C_X_P(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; if( fData[fIndex(i ,j ,k )] == target_id || fData[fIndex(i ,j+1,k )] == target_id || fData[fIndex(i ,j ,k+1)] == target_id || fData[fIndex(i ,j+1,k+1)] == target_id ) { return target_id; } return -1; } T interpolateF2C_Y_M(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; if( fData[fIndex(i ,j+1,k )] == target_id || fData[fIndex(i ,j+1,k+1)] == target_id || fData[fIndex(i+1,j+1,k )] == target_id || fData[fIndex(i+1,j+1,k+1)] == target_id ) { return target_id; } return -1; } T interpolateF2C_Y_P(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; if( fData[fIndex(i ,j ,k )] == target_id || fData[fIndex(i+1,j ,k )] == target_id || fData[fIndex(i ,j ,k+1)] == target_id || fData[fIndex(i+1,j ,k+1)] == target_id ) { return target_id; } return -1; } T interpolateF2C_Z_M(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; if( fData[fIndex(i ,j ,k+1)] == target_id || fData[fIndex(i ,j+1,k+1)] == target_id || fData[fIndex(i+1,j ,k+1)] == target_id || fData[fIndex(i+1,j+1,k+1)] == target_id ) { return target_id; } return -1; } T interpolateF2C_Z_P(const T* fData, const Index3DS& fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; if( fData[fIndex(i ,j ,k )] == target_id || fData[fIndex(i+1,j ,k )] == target_id || fData[fIndex(i ,j+1,k )] == target_id || fData[fIndex(i+1,j+1,k )] == target_id ) { return target_id; } return -1; } /// レベルL→L+1の線形補間 (粗c(I,J,K) → 細f(i,j,k)). T interpolateC2F(const T* cData, const Index3DS& cIndex, int i, int j, int k) { int I = i/2; int J = j/2; int K = k/2; if( cData[cIndex(I ,J ,K )] == target_id ) { return target_id; } return -1; } /* /// 隣接データクラスから仮想セルデータをコピー(同レベル間). void copyFromNeighbor(Face face); /// 隣接データクラスから仮想セルデータをコピー(レベルL+1→L). void copyFromNeighborF2C(Face face, Subface subface); /// 隣接データクラスから仮想セルデータをコピー(レベルL→L+1). void copyFromNeighborC2F(Face face, Subface subface); /// 送信バッファに仮想セルデータをコピー(同レベル間). void copyToCommBuffer(Face face); /// 送信バッファに仮想セルデータをコピー(レベルL+1→L). void copyToCommBufferF2C(Face face, Subface subface); /// 送信バッファに仮想セルデータをコピー(レベルL→L+1). void copyToCommBufferC2F(Face face, Subface subface); /// 受信バッファから仮想セルデータをコピー(同レベル間). void copyFromCommBuffer(Face face); /// 受信バッファから仮想セルデータをコピー(レベルL+1→L). void copyFromCommBufferF2C(Face face, Subface subface); /// 受信バッファから仮想セルデータをコピー(レベルL→L+1). void copyFromCommBufferC2F(Face face, Subface subface); void copyFromNeighborF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* fData, Index3DS fIndex, T* cData, Index3DS cIndex); void copyFromNeighborC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* cData, Index3DS cIndex, T* fData, Index3DS fIndex); void copyToCommBufferC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* cData, Index3DS cIndex, T* buffer); void copyToCommBufferF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* fData, Index3DS fIndex, T* buffer); */ /// 隣接データクラスから仮想セルデータをコピー(同レベル間). void copyFromNeighbor(Face face) { Scalar3D<T>* dc = neighborDataClass[face][0]; if (!dc) return; switch (face) { case X_M: dataClass->copyFromDataClass(-vc, 0, 0, dc->getSizeX()-vc, 0, 0, vc, ny, nz, dc); break; case X_P: dataClass->copyFromDataClass(nx, 0, 0, 0, 0, 0, vc, ny, nz, dc); break; case Y_M: dataClass->copyFromDataClass(0, -vc, 0, 0, dc->getSizeY()-vc, 0, nx, vc, nz, dc); break; case Y_P: dataClass->copyFromDataClass(0, ny, 0, 0, 0, 0, nx, vc, nz, dc); break; case Z_M: dataClass->copyFromDataClass(0, 0, -vc, 0, 0, dc->getSizeZ()-vc, nx, ny, vc, dc); break; case Z_P: dataClass->copyFromDataClass(0, 0, nz, 0, 0, 0, nx, ny, vc, dc); break; default: break; } } /// 隣接データクラスから仮想セルデータをコピー(レベルL+1→L). void copyFromNeighborF2C(Face face, Subface subface) { T* cData = dataClass->getData(); Index3DS cIndex = dataClass->getIndex(); Scalar3D<T>* f = neighborDataClass[face][subface]; T* fData = f->getData(); Index3DS fIndex = f->getIndex(); copyFromNeighborF2C_0(nx, ny, nz, vc, face, subface, fData, fIndex, cData, cIndex); } /// 隣接データクラスから仮想セルデータをコピー(レベルL→L+1). void copyFromNeighborC2F(Face face, Subface subface) { T* fData = dataClass->getData(); Index3DS fIndex = dataClass->getIndex(); Scalar3D<T>* c = neighborDataClass[face][0]; T* cData = c->getData(); Index3DS cIndex = c->getIndex(); copyFromNeighborC2F_0(nx, ny, nz, vc, face, subface, cData, cIndex, fData, fIndex); } /// 送信バッファに仮想セルデータをコピー(同レベル間). void copyToCommBuffer(Face face) { T* buffer = sendBuffer[face][0]; if (!buffer) return; switch (face) { case X_M: dataClass->copyToBuffer(0, 0, 0, vc, ny, nz, buffer); break; case X_P: dataClass->copyToBuffer(nx-vc, 0, 0, vc, ny, nz, buffer); break; case Y_M: dataClass->copyToBuffer(0, 0, 0, nx, vc, nz, buffer); break; case Y_P: dataClass->copyToBuffer(0, ny-vc, 0, nx, vc, nz, buffer); break; case Z_M: dataClass->copyToBuffer(0, 0, 0, nx, ny, vc, buffer); break; case Z_P: dataClass->copyToBuffer(0, 0, nz-vc, nx, ny, vc, buffer); break; default: break; } } /// 送信バッファに仮想セルデータをコピー(レベルL+1→L). void copyToCommBufferF2C(Face face, Subface subface) { T* buffer = sendBuffer[face][0]; T* fData = dataClass->getData(); Index3DS fIndex = dataClass->getIndex(); copyToCommBufferF2C_0(nx, ny, nz, vc, face, subface, fData, fIndex, buffer); } /// 送信バッファに仮想セルデータをコピー(レベルL→L+1). void copyToCommBufferC2F(Face face, Subface subface) { T* cData = dataClass->getData(); Index3DS cIndex = dataClass->getIndex(); T* buffer = sendBuffer[face][subface]; copyToCommBufferC2F_0(nx, ny, nz, vc, face, subface, cData, cIndex, buffer); } /// 受信バッファから仮想セルデータをコピー(同レベル間). void copyFromCommBuffer(Face face) { T* buffer = recvBuffer[face][0]; if (!buffer) return; switch (face) { case X_M: dataClass->copyFromBuffer(-vc, 0, 0, vc, ny, nz, buffer); break; case X_P: dataClass->copyFromBuffer(nx, 0, 0, vc, ny, nz, buffer); break; case Y_M: dataClass->copyFromBuffer(0, -vc, 0, nx, vc, nz, buffer); break; case Y_P: dataClass->copyFromBuffer(0, ny, 0, nx, vc, nz, buffer); break; case Z_M: dataClass->copyFromBuffer(0, 0, -vc, nx, ny, vc, buffer); break; case Z_P: dataClass->copyFromBuffer(0, 0, nz, nx, ny, vc, buffer); break; default: break; } } /// 受信バッファから仮想セルデータをコピー(レベルL+1→L). void copyFromCommBufferF2C(Face face, Subface subface) { T* buffer = recvBuffer[face][subface]; switch (face) { case X_M: { int j0 = (ny/2) * subfaceOrigin0(subface); int k0 = (nz/2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(-vc, j0, k0, vc, ny/2, nz/2, buffer); break; } case X_P: { int j0 = (ny/2) * subfaceOrigin0(subface); int k0 = (nz/2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(nx, j0, k0, vc, ny/2, nz/2, buffer); break; } case Y_M: { int k0 = (nz/2) * subfaceOrigin0(subface); int i0 = (nx/2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, -vc, k0, nx/2, vc, nz/2, buffer); break; } case Y_P: { int k0 = (nz/2) * subfaceOrigin0(subface); int i0 = (nx/2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, ny, k0, nx/2, vc, nz/2, buffer); break; } case Z_M: { int i0 = (nx/2) * subfaceOrigin0(subface); int j0 = (ny/2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, j0, -vc, nx/2, ny/2, vc, buffer); break; } case Z_P: { int i0 = (nx/2) * subfaceOrigin0(subface); int j0 = (ny/2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, j0, nz, nx/2, ny/2, vc, buffer); break; } default: break; } } /// 受信バッファから仮想セルデータをコピー(レベルL→L+1). void copyFromCommBufferC2F(Face face, Subface subface) { copyFromCommBuffer(face); } void copyFromNeighborF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* fData, Index3DS fIndex, T* cData, Index3DS cIndex) { switch (face) { case X_M: { int j0 = (ny/2) * subfaceOrigin0(subface); int k0 = (nz/2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < vc; i++) { cData[cIndex(i-vc, j+j0, k+k0)] = interpolateF2C_X_M(fData, fIndex, i+nx/2-vc, j, k); } } } break; } case X_P: { int j0 = (ny/2) * subfaceOrigin0(subface); int k0 = (nz/2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < vc; i++) { cData[cIndex(i+nx, j+j0, k+k0)] = interpolateF2C_X_P(fData, fIndex, i, j, k); } } } break; } case Y_M: { int k0 = (nz/2) * subfaceOrigin0(subface); int i0 = (nx/2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx/2; i++) { cData[cIndex(i+i0, j-vc, k+k0)] = interpolateF2C_Y_M(fData, fIndex, i, j+ny/2-vc, k); } } } break; } case Y_P: { int k0 = (nz/2) * subfaceOrigin0(subface); int i0 = (nx/2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx/2; i++) { cData[cIndex(i+i0, j+ny, k+k0)] = interpolateF2C_Y_P(fData, fIndex, i, j, k); } } } break; } case Z_M: { int i0 = (nx/2) * subfaceOrigin0(subface); int j0 = (ny/2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < nx/2; i++) { cData[cIndex(i+i0, j+j0, k-vc)] = interpolateF2C_Z_M(fData, fIndex, i, j, k+nz/2-vc); } } } break; } case Z_P: { int i0 = (nx/2) * subfaceOrigin0(subface); int j0 = (ny/2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < nx/2; i++) { cData[cIndex(i+i0, j+j0, k+nz)] = interpolateF2C_Z_P(fData, fIndex, i, j, k); } } } break; } default: break; } } void copyFromNeighborC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* cData, Index3DS cIndex, T* fData, Index3DS fIndex) { switch (face) { case X_M: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { fData[fIndex(I-vc, J, K)] = interpolateC2F(cData, cIndex, I+2*nx-vc, J+J0, K+K0); } } } break; } case X_P: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { fData[fIndex(I+nx, J, K)] = interpolateC2F(cData, cIndex, I, J+J0, K+K0); } } } break; } case Y_M: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { fData[fIndex(I, J-vc, K)] = interpolateC2F(cData, cIndex, I+I0, J+2*ny-vc, K+K0); } } } break; } case Y_P: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { fData[fIndex(I, J+ny, K)] = interpolateC2F(cData, cIndex, I+I0, J, K+K0); } } } break; } case Z_M: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { fData[fIndex(I, J, K-vc)] = interpolateC2F(cData, cIndex, I+I0, J+J0, K+2*nz-vc); } } } break; } case Z_P: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { fData[fIndex(I, J, K+nz)] = interpolateC2F(cData, cIndex, I+I0, J+J0, K); } } } break; } default: break; } } void copyToCommBufferC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* cData, Index3DS cIndex, T* buffer) { int ii = 0; switch (face) { case X_M: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { int m = I + vc*(J + ny*K); buffer[m] = interpolateC2F(cData, cIndex, I, J+J0, K+K0); } } } break; } case X_P: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { int m = I + vc*(J + ny*K); buffer[m] = interpolateC2F(cData, cIndex, I+2*nx-vc, J+J0, K+K0); } } } break; } case Y_M: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { int m = I + nx*(J + vc*K); buffer[m] = interpolateC2F(cData, cIndex, I+I0, J, K+K0); } } } break; } case Y_P: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { int m = I + nx*(J + vc*K); buffer[m] = interpolateC2F(cData, cIndex, I+I0, J+2*ny-vc, K+K0); } } } break; } case Z_M: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { int m = I + nx*(J + ny*K); buffer[m] = interpolateC2F(cData, cIndex, I+I0, J+J0, K); } } } break; } case Z_P: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { int m = I + nx*(J + ny*K); buffer[m] = interpolateC2F(cData, cIndex, I+I0, J+J0, K+2*nz-vc); } } } break; } default: break; } } void copyToCommBufferF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* fData, Index3DS fIndex, T* buffer) { int ii = 0; switch (face) { case X_M: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < vc; i++) { int m = i + vc*(j + ny/2*k); buffer[m] = interpolateF2C_X_M(fData, fIndex, i, j, k); } } } break; } case X_P: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < vc; i++) { int m = i + vc*(j + ny/2*k); buffer[m] = interpolateF2C_X_P(fData, fIndex, i+nx/2-vc, j, k); } } } break; } case Y_M: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx/2; i++) { int m = i + nx/2*(j + vc*k); buffer[m] = interpolateF2C_Y_M(fData, fIndex, i, j, k); } } } break; } case Y_P: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz/2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx/2; i++) { int m = i + nx/2*(j + vc*k); buffer[m] = interpolateF2C_Y_P(fData, fIndex, i, j+ny/2-vc, k); } } } break; } case Z_M: { #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < nx/2; i++) { int m = i + nx/2*(j + ny/2*k); buffer[m] = interpolateF2C_Z_M(fData, fIndex, i, j, k); } } } break; } case Z_P: { #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny/2; j++) { for (int i = 0; i < nx/2; i++) { int m = i + nx/2*(j + ny/2*k); buffer[m] = interpolateF2C_Z_P(fData, fIndex, i, j, k+nz/2-vc); } } } break; } default: break; } } }; #ifdef BCMT_NAMESPACE } // namespace BCMT_NAMESPACE #endif #endif // SCALAR_3D_UPDATER_H
GB_unaryop__abs_uint32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_uint64 // op(A') function: GB_tran__abs_uint32_uint64 // C type: uint32_t // A type: uint64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_uint64 ( uint32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
valid.res6.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_128_28_28_128_3_3.h" #include "gen_ukr_A4B2gemm_1_128_28_28_128_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 28; int Ny = 28; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<128+0;c5+=128) { for(int xy5=0;xy5<784+0;xy5+=784) { for(int f5=0;f5<128+0;f5+=128) { for(int c4=c5;c4<min(128, 128+c5);c4+=128) { for(int xy4=xy5;xy4<min(784, 784+xy5);xy4+=784) { for(int f4=f5;f4<min(128, 128+f5);f4+=128) { for(int c3=c4;c3<min(128, 128+c4);c3+=Tc1) { for(int f3=f4;f3<min(128, 128+f4);f3+=Tf2) { for(int xy3=xy4;xy3<min(784, 784+xy4);xy3+=Txy3) { for(int xy2=xy3;xy2<min(784, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(128, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(128, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(128, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(784, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(128, 16+f2);f1+=16) { int ctile=min(Tc1, 128-c1); int x1=xy1/28; int y1=xy1%28/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*115200+c1_1*900+1*x1*30+1*y1*1+c1_2*1; int offsetB=0+kf1_1*18432+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*100352+of1_1*784+x1*28+y1*1+of1_2*1; if(28-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(28*28-xy1>=6){ for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=28-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
GB_AxB_dot4_template.c
//------------------------------------------------------------------------------ // GB_AxB_dot4: C+=A'*B via dot products, where C is dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // C+=A'*B where C is a dense matrix and computed in-place. The monoid of the // semiring matches the accum operator, and the type of C matches the ztype of // accum. That is, no typecasting can be done with C. // The PAIR operator as the multiplier provides important special cases. // See Template/GB_AxB_dot_cij.c for details. // cij += A(k,i) * B(k,j) #undef GB_DOT_MERGE #define GB_DOT_MERGE \ { \ if (!cij_updated) \ { \ cij_updated = true ; \ GB_GETC (cij, pC) ; \ } \ GB_GETA (aki, Ax, pA) ; /* aki = A(k,i) */ \ GB_GETB (bkj, Bx, pB) ; /* bkj = B(k,j) */ \ GB_MULTADD (cij, aki, bkj) ; /* cij += aki * bkj */ \ GB_DOT_TERMINAL (cij) ; /* break if cij == terminal */ \ pA++ ; \ pB++ ; \ } { //-------------------------------------------------------------------------- // get A, B, and C //-------------------------------------------------------------------------- GB_CTYPE *GB_RESTRICT Cx = C->x ; const int64_t cvlen = C->vlen ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bh = B->h ; const int64_t *GB_RESTRICT Bi = B->i ; const GB_BTYPE *GB_RESTRICT Bx = B_is_pattern ? NULL : B->x ; const int64_t bvlen = B->vlen ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ai = A->i ; const GB_ATYPE *GB_RESTRICT Ax = A_is_pattern ? NULL : A->x ; ASSERT (A->vlen == B->vlen) ; int ntasks = naslice * nbslice ; //-------------------------------------------------------------------------- // C += A'*B //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the entries in A and B to compute //---------------------------------------------------------------------- int a_taskid = taskid / nbslice ; int b_taskid = taskid % nbslice ; int64_t akfirst = A_slice [a_taskid] ; int64_t aklast = A_slice [a_taskid+1] ; if (akfirst >= aklast) continue ; int64_t bkfirst = B_slice [b_taskid] ; int64_t bklast = B_slice [b_taskid+1] ; if (bkfirst >= bklast) continue ; //---------------------------------------------------------------------- // C+=A'*B via dot products //---------------------------------------------------------------------- for (int64_t bk = bkfirst ; bk < bklast ; bk++) { //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ int64_t j = (Bh == NULL) ? bk : Bh [bk] ; int64_t pB_start = Bp [bk] ; int64_t pB_end = Bp [bk+1] ; int64_t pC_start = j * cvlen ; int64_t bjnz = pB_end - pB_start ; if (bjnz == 0) continue ; if (bjnz == bvlen) { //-------------------------------------------------------------- // B(:,j) is dense //-------------------------------------------------------------- for (int64_t ak = akfirst ; ak < aklast ; ak++) { //---------------------------------------------------------- // get A(:,i) //---------------------------------------------------------- int64_t i = (Ah == NULL) ? ak : Ah [ak] ; int64_t pA = Ap [ak] ; int64_t pA_end = Ap [ak+1] ; int64_t ainz = pA_end - pA ; if (ainz == 0) continue ; GB_CIJ_DECLARE (cij) ; // declare the cij scalar int64_t pC = i + pC_start ; // C(i,j) is at Cx [pC] int64_t pB = pB_start ; GB_GETC (cij, pC) ; // cij = Cx [pC] //---------------------------------------------------------- // special cases for the PAIR multiplier //---------------------------------------------------------- // Since B(:,j) is dense, C(i,j) += A(:,i)'*B(:,j) is // trivial to compute with the PAIR multiplier. #if GB_IS_PAIR_MULTIPLIER #if GB_IS_ANY_MONOID // ANY monoid: take the first entry found cij = 1 ; #elif GB_IS_EQ_MONOID // A(:,i)'*B(:j) is one, so this result must be // accumulated into cij, as cij += 1, where the // accumulator is the EQ operator. cij = (cij == 1) ; #elif (GB_CTYPE_BITS > 0) // PLUS, XOR monoids: A(:,i)'*B(:,j) is nnz(A(:,i)), // for bool, 8-bit, 16-bit, or 32-bit integer uint64_t t = ((uint64_t) cij) + ainz ; cij = (GB_CTYPE) (t & GB_CTYPE_BITS) ; #else // PLUS monoid for float, double, or 64-bit integers cij += (GB_CTYPE) ainz ; #endif #else //---------------------------------------------------------- // general case //---------------------------------------------------------- if (ainz == bvlen) { //------------------------------------------------------ // both A(:,i) and B(:,j) are dense //------------------------------------------------------ GB_PRAGMA_VECTORIZE_DOT for (int64_t k = 0 ; k < bvlen ; k++) { GB_DOT_TERMINAL (cij) ; // break if terminal // cij += A(k,i) * B(k,j) GB_GETA (aki, Ax, pA+k) ; // aki = A(k,i) GB_GETB (bkj, Bx, pB+k) ; // bkj = B(k,j) GB_MULTADD (cij, aki, bkj) ; // cij += aki * bkj } } else { //------------------------------------------------------ // A(:,i) is sparse and B(:,j) is dense //------------------------------------------------------ GB_PRAGMA_VECTORIZE_DOT for (int64_t p = pA ; p < pA_end ; p++) { GB_DOT_TERMINAL (cij) ; // break if terminal int64_t k = Ai [p] ; // cij += A(k,i) * B(k,j) GB_GETA (aki, Ax, p ) ; // aki = A(k,i) GB_GETB (bkj, Bx, pB+k) ; // bkj = B(k,j) GB_MULTADD (cij, aki, bkj) ; // cij += aki * bkj } } #endif GB_PUTC (cij, pC) ; // Cx [pC] = cij } } else { //-------------------------------------------------------------- // B(:,j) is sparse //-------------------------------------------------------------- // get the first and last index in B(:,j) int64_t ib_first = Bi [pB_start] ; int64_t ib_last = Bi [pB_end-1] ; for (int64_t ak = akfirst ; ak < aklast ; ak++) { //---------------------------------------------------------- // get A(:,i) //---------------------------------------------------------- int64_t i = (Ah == NULL) ? ak : Ah [ak] ; int64_t pA = Ap [ak] ; int64_t pA_end = Ap [ak+1] ; int64_t ainz = pA_end - pA ; if (ainz == 0) continue ; // get the first and last index in A(:,i) if (Ai [pA_end-1] < ib_first || ib_last < Ai [pA]) continue; //---------------------------------------------------------- // C(i,j) += A(:,i)'*B(:,j) //---------------------------------------------------------- GB_CIJ_DECLARE (cij) ; // declare the cij scalar int64_t pC = i + pC_start ; // C(i,j) is at Cx [pC] int64_t pB = pB_start ; if (ainz == bvlen) { //------------------------------------------------------ // A(:,i) is dense and B(:,j) is sparse //------------------------------------------------------ GB_GETC (cij, pC) ; // cij = Cx [pC] #if GB_IS_PAIR_MULTIPLIER #if GB_IS_ANY_MONOID // ANY monoid: take the first entry found cij = 1 ; #elif GB_IS_EQ_MONOID // A(:,i)'*B(:j) is one, so this result must be // accumulated into cij, as cij += 1, where the // accumulator is the EQ operator. cij = (cij == 1) ; #elif (GB_CTYPE_BITS > 0) // PLUS, XOR monoids: A(:,i)'*B(:,j) is nnz(A(:,i)), // for bool, 8-bit, 16-bit, or 32-bit integer uint64_t t = ((uint64_t) cij) + bjnz ; cij = (GB_CTYPE) (t & GB_CTYPE_BITS) ; #else // PLUS monoid for float, double, or 64-bit integers cij += (GB_CTYPE) bjnz ; #endif #else GB_PRAGMA_VECTORIZE_DOT for (int64_t p = pB ; p < pB_end ; p++) { GB_DOT_TERMINAL (cij) ; // break if terminal int64_t k = Bi [p] ; // cij += A(k,i) * B(k,j) GB_GETA (aki, Ax, pA+k) ; // aki = A(k,i) GB_GETB (bkj, Bx, p ) ; // bkj = B(k,j) GB_MULTADD (cij, aki, bkj) ; // cij += aki*bkj } #endif GB_PUTC (cij, pC) ; // Cx [pC] = cij } else if (ainz > 8 * bjnz) { //------------------------------------------------------ // B(:,j) is very sparse compared to A(:,i) //------------------------------------------------------ bool cij_updated = false ; while (pA < pA_end && pB < pB_end) { int64_t ia = Ai [pA] ; int64_t ib = Bi [pB] ; if (ia < ib) { // A(ia,i) appears before B(ib,j) // discard all entries A(ia:ib-1,i) int64_t pleft = pA + 1 ; int64_t pright = pA_end - 1 ; GB_TRIM_BINARY_SEARCH (ib, Ai, pleft, pright) ; ASSERT (pleft > pA) ; pA = pleft ; } else if (ib < ia) { // B(ib,j) appears before A(ia,i) pB++ ; } else // ia == ib == k { // A(k,i) and B(k,j) are next entries to merge GB_DOT_MERGE ; } } if (cij_updated) GB_PUTC (cij, pC) ; } else if (bjnz > 8 * ainz) { //------------------------------------------------------ // A(:,i) is very sparse compared to B(:,j) //------------------------------------------------------ bool cij_updated = false ; while (pA < pA_end && pB < pB_end) { int64_t ia = Ai [pA] ; int64_t ib = Bi [pB] ; if (ia < ib) { // A(ia,i) appears before B(ib,j) pA++ ; } else if (ib < ia) { // B(ib,j) appears before A(ia,i) // discard all entries B(ib:ia-1,j) int64_t pleft = pB + 1 ; int64_t pright = pB_end - 1 ; GB_TRIM_BINARY_SEARCH (ia, Bi, pleft, pright) ; ASSERT (pleft > pB) ; pB = pleft ; } else // ia == ib == k { // A(k,i) and B(k,j) are next entries to merge GB_DOT_MERGE ; } } if (cij_updated) GB_PUTC (cij, pC) ; } else { //------------------------------------------------------ // A(:,i) and B(:,j) have about the same sparsity //------------------------------------------------------ bool cij_updated = false ; while (pA < pA_end && pB < pB_end) { int64_t ia = Ai [pA] ; int64_t ib = Bi [pB] ; if (ia < ib) { // A(ia,i) appears before B(ib,j) pA++ ; } else if (ib < ia) { // B(ib,j) appears before A(ia,i) pB++ ; } else // ia == ib == k { // A(k,i) and B(k,j) are the entries to merge GB_DOT_MERGE ; } } if (cij_updated) GB_PUTC (cij, pC) ; } } } } } }
residual_based_adjoint_bossak_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // #if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED) #define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED // System includes #include <vector> #include <string> #include <unordered_set> #include <functional> // External includes // Project includes #include "includes/define.h" #include "includes/checks.h" #include "includes/kratos_parameters.h" #include "solving_strategies/schemes/scheme.h" #include "response_functions/adjoint_response_function.h" #include "utilities/variable_utils.h" #include "utilities/indirect_scalar.h" #include "utilities/adjoint_extensions.h" namespace Kratos { ///@name Kratos Classes ///@{ /// A scheme for dynamic adjoint equations, using Bossak time integration. /** * It can be used for either first- or second-order time derivatives. Elements * and conditions must provide a specialization of AdjointExtensions via their * data value container, which allows the scheme to operate independently of * the variable arrangements in the element or condition. */ template <class TSparseSpace, class TDenseSpace> class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TSystemMatrixType SystemMatrixType; typedef typename BaseType::TSystemVectorType SystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::DofsArrayType DofsArrayType; ///@} ///@name Life Cycle ///@{ /// Constructor. ResidualBasedAdjointBossakScheme( Parameters Settings, AdjointResponseFunction::Pointer pResponseFunction ) : mpResponseFunction(pResponseFunction) { Parameters default_parameters(R"({ "name" : "adjoint_bossak", "scheme_type" : "bossak", "alpha_bossak" : -0.3 })"); Settings.ValidateAndAssignDefaults(default_parameters); mBossak.Alpha = Settings["alpha_bossak"].GetDouble(); } /// Destructor. ~ResidualBasedAdjointBossakScheme() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void Initialize(ModelPart& rModelPart) override { KRATOS_TRY; BaseType::Initialize(rModelPart); // Allocate auxiliary memory. int num_threads = OpenMPUtils::GetNumThreads(); mLeftHandSide.resize(num_threads); mResponseGradient.resize(num_threads); mFirstDerivsLHS.resize(num_threads); mFirstDerivsResponseGradient.resize(num_threads); mSecondDerivsLHS.resize(num_threads); mSecondDerivsResponseGradient.resize(num_threads); mAdjointValuesVector.resize(num_threads); mAdjointIndirectVector2.resize(num_threads); mAdjointIndirectVector3.resize(num_threads); mAuxAdjointIndirectVector1.resize(num_threads); InitializeNodeNeighbourCount(rModelPart.Nodes()); rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha; KRATOS_CATCH(""); } void InitializeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); const auto& r_current_process_info = rModelPart.GetProcessInfo(); mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info)); this->CalculateNodeNeighbourCount(rModelPart); KRATOS_CATCH(""); } void FinalizeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); this->UpdateAuxiliaryVariable(rModelPart); KRATOS_CATCH(""); } void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; // Update degrees of freedom: adjoint variables associated to the // residual of the physical problem. this->mpDofUpdater->UpdateDofs(rDofSet, rDx); // Update adjoint variables associated to time integration. this->UpdateTimeSchemeAdjoints(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); rCurrentElement.GetValuesVector(mAdjointValuesVector[k]); const auto local_size = mAdjointValuesVector[k].size(); if (rRHS_Contribution.size() != local_size) { rRHS_Contribution.resize(local_size, false); } if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size) { rLHS_Contribution.resize(local_size, local_size, false); } this->CheckAndResizeThreadStorage(local_size); this->CalculateGradientContributions(rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateFirstDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateSecondDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculatePreviousTimeStepContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateResidualLocalContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); rCurrentElement.EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentElement, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateSystemContributions(Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; // NOT TESTED !!! rCurrentCondition.CalculateLocalSystem( rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution(Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentCondition, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedAdjointBossakScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: struct BossakConstants { double Alpha; double Beta; double Gamma; double C0; double C1; double C2; double C3; double C4; double C5; double C6; double C7; }; ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ BossakConstants mBossak; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); AdjointResponseFunction::Pointer mpResponseFunction; std::vector<LocalSystemMatrixType> mLeftHandSide; std::vector<LocalSystemVectorType> mResponseGradient; std::vector<LocalSystemMatrixType> mFirstDerivsLHS; std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient; std::vector<LocalSystemMatrixType> mSecondDerivsLHS; std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient; std::vector<LocalSystemVectorType> mAdjointValuesVector; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3; std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void CalculateGradientContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentElement.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo); this->mpResponseFunction->CalculateGradient( rCurrentElement, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) = mLeftHandSide[k]; noalias(rRHS_Contribution) = -1. * mResponseGradient[k]; } void CalculateFirstDerivativeContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentElement.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo); mpResponseFunction->CalculateFirstDerivativesGradient( rCurrentElement, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C6 * mFirstDerivsResponseGradient[k]; } void CalculateSecondDerivativeContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_response_function = *(this->mpResponseFunction); rCurrentElement.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); r_response_function.CalculateSecondDerivativesGradient( rCurrentElement, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C7 * mSecondDerivsResponseGradient[k]; } void CalculatePreviousTimeStepContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { const auto& r_geometry = rCurrentElement.GetGeometry(); const auto k = OpenMPUtils::ThisThread(); auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS); unsigned local_index = 0; for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { auto& r_node = r_geometry[i_node]; r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1); r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { rRHS_Contribution[local_index] += weight * (mBossak.C7 * mAuxAdjointIndirectVector1[k][d] + mBossak.C4 * mAdjointIndirectVector2[k][d] + mBossak.C5 * mAdjointIndirectVector3[k][d]); ++local_index; } } } void CalculateResidualLocalContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_residual_adjoint = mAdjointValuesVector[k]; rCurrentElement.GetValuesVector(r_residual_adjoint); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint); } void InitializeNodeNeighbourCount(ModelPart::NodesContainerType& rNodes) { // This loop should not be omp parallel // The operation is not threadsafe if the value is uninitialized for (auto& r_node : rNodes) r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0); } void CalculateNodeNeighbourCount(ModelPart& rModelPart) { // Calculate number of neighbour elements for each node. const int num_nodes = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i = 0; i < num_nodes; ++i) { Node<3>& r_node = *(rModelPart.Nodes().begin() + i); r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0); } const int num_elements = rModelPart.NumberOfElements(); #pragma omp parallel for for (int i = 0; i < num_elements; ++i) { Element& r_element = *(rModelPart.Elements().begin() + i); Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j) { double& r_num_neighbour = r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); #pragma omp atomic r_num_neighbour += 1.0; } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS); } void UpdateTimeSchemeAdjoints(ModelPart& rModelPart) { KRATOS_TRY; auto lambda2_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { rExtensions.GetFirstDerivativesVariables(rVec); }); auto lambda3_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetSecondDerivativesVariables(rVec); }); SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes()); SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes()); const int number_of_elements = rModelPart.NumberOfElements(); const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); Vector adjoint2_aux, adjoint3_aux; std::vector<IndirectScalar<double>> adjoint2_old, adjoint3_old; #pragma omp parallel for private(adjoint2_aux, adjoint3_aux, adjoint2_old, adjoint3_old) for (int i = 0; i < number_of_elements; ++i) { Element& r_element = *(rModelPart.ElementsBegin() + i); const int k = OpenMPUtils::ThisThread(); r_element.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); r_element.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], r_process_info); this->mpResponseFunction->CalculateFirstDerivativesGradient( r_element, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], r_process_info); r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); this->mpResponseFunction->CalculateSecondDerivativesGradient( r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info); if (adjoint2_aux.size() != mFirstDerivsResponseGradient[k].size()) adjoint2_aux.resize(mFirstDerivsResponseGradient[k].size(), false); noalias(adjoint2_aux) = -mFirstDerivsResponseGradient[k] - prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]); if (adjoint3_aux.size() != mSecondDerivsResponseGradient[k].size()) adjoint3_aux.resize(mSecondDerivsResponseGradient[k].size(), false); noalias(adjoint3_aux) = -mSecondDerivsResponseGradient[k] - prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]); auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { r_extensions.GetFirstDerivativesVector( i_node, mAdjointIndirectVector2[k], 0); r_extensions.GetSecondDerivativesVector( i_node, mAdjointIndirectVector3[k], 0); r_extensions.GetFirstDerivativesVector(i_node, adjoint2_old, 1); r_extensions.GetSecondDerivativesVector(i_node, adjoint3_old, 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); Node<3>& r_node = r_geometry[i_node]; const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); r_node.SetLock(); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index]; mAdjointIndirectVector2[k][d] += mBossak.C0 * weight * adjoint2_old[d]; mAdjointIndirectVector2[k][d] += mBossak.C1 * weight * adjoint3_old[d]; mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index]; mAdjointIndirectVector3[k][d] += mBossak.C2 * weight * adjoint2_old[d]; mAdjointIndirectVector3[k][d] += mBossak.C3 * weight * adjoint3_old[d]; mAdjointIndirectVector3[k][d] += weight * mAuxAdjointIndirectVector1[k][d]; ++local_index; } r_node.UnSetLock(); } } // Finalize global assembly Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator()); Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } void UpdateAuxiliaryVariable(ModelPart& rModelPart) { KRATOS_TRY; auto aux_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rOut) { return rExtensions.GetAuxiliaryVariables(rOut); }); SetToZero_AdjointVars(aux_vars, rModelPart.Nodes()); // Loop over elements to assemble the remaining terms const int number_of_elements = rModelPart.NumberOfElements(); const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); Vector aux_adjoint_vector; #pragma omp parallel for private(aux_adjoint_vector) for (int i = 0; i < number_of_elements; ++i) { Element& r_element = *(rModelPart.ElementsBegin() + i); const int k = OpenMPUtils::ThisThread(); r_element.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info); mSecondDerivsLHS[k] *= mBossak.Alpha; this->mpResponseFunction->CalculateSecondDerivativesGradient( r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info); if (aux_adjoint_vector.size() != mSecondDerivsLHS[k].size1()) aux_adjoint_vector.resize(mSecondDerivsLHS[k].size1(), false); noalias(aux_adjoint_vector) = prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) + mSecondDerivsResponseGradient[k]; auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { Node<3>& r_node = r_geometry[i_node]; r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0); r_node.SetLock(); for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d) { mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index]; ++local_index; } r_node.UnSetLock(); } } // Finalize global assembly Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } void CheckAndResizeThreadStorage(unsigned SystemSize) { const int k = OpenMPUtils::ThisThread(); if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize) { mLeftHandSide[k].resize(SystemSize, SystemSize, false); } if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize) { mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize) { mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mResponseGradient[k].size() != SystemSize) { mResponseGradient[k].resize(SystemSize, false); } if (mFirstDerivsResponseGradient[k].size() != SystemSize) { mFirstDerivsResponseGradient[k].resize(SystemSize, false); } if (mSecondDerivsResponseGradient[k].size() != SystemSize) { mSecondDerivsResponseGradient[k].resize(SystemSize, false); } } static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime) { BossakConstants bc; bc.Alpha = Alpha; bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha); bc.Gamma = 0.5 - bc.Alpha; bc.C0 = 1.0 - bc.Gamma / bc.Beta; bc.C1 = -1.0 / (bc.Beta * DeltaTime); bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime; bc.C3 = (1.0 - 0.5 / bc.Beta); bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta); bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta); bc.C6 = bc.Gamma / (bc.Beta * DeltaTime); bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta); return bc; } static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo) { const ProcessInfo& r_last_process_info = rCurrentProcessInfo.GetPreviousSolutionStepInfo(1); // Note: solution is backwards in time, but we still want a positive // time step // (it is the time step in the "forward" Bossak scheme). double time_step = r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME); KRATOS_ERROR_IF(time_step <= 0.0) << "Backwards in time solution is not decreasing time from last " "step." << std::endl; return time_step; } struct Hash { std::size_t operator()(const VariableData* const& p) const { return p->Key(); } }; struct Pred { bool operator()(const VariableData* const l, const VariableData* const r) const { return *l == *r; } }; // Gathers variables needed for assembly. static std::vector<const VariableData*> GatherVariables( const ModelPart::ElementsContainerType& rElements, std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars) { KRATOS_TRY; const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<const VariableData*> local_vars; std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads); #pragma omp parallel for private(local_vars) for (int i = 0; i < static_cast<int>(rElements.size()); ++i) { auto& r_element = *(rElements.begin() + i); GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars); const int k = OpenMPUtils::ThisThread(); thread_vars[k].insert(local_vars.begin(), local_vars.end()); } std::unordered_set<const VariableData*, Hash, Pred> all_vars; for (int i = 0; i < num_threads; ++i) { all_vars.insert(thread_vars[i].begin(), thread_vars[i].end()); } return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()}; KRATOS_CATCH(""); } static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables, Communicator& rComm) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedAdjointBossakScheme */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(unsigned char *pixels,const size_t count) { register unsigned char *p; size_t remaining; p=pixels; remaining=count; while (--remaining) { *(p+1)+=*p; p++; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count, const size_t row_size) { register unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { register unsigned char *p, *q; register ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, packet_size, row_size; register ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(pixels,count); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->alpha_trait=BlendPixelTrait; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size=GetLayerInfoSize(psd_info,image); if (size == 0) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
fib-omp1.c
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file fibonacci.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Recursive computation of Fibonacci * * @see https://en.wikipedia.org/wiki/Fibonacci_number * @see http://algo.ing.unimo.it/people/andrea/Didattica/HPC/index.html */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "utils.h" #define F_30 832040LL #define F_40 102334155LL #define F_50 12586269025LL #define F_60 1548008755920LL static int N; static int CUTOFF; #define SEPARATOR "------------------------------------\n" // Parse command line arguments to set solver parameters void parse_arguments(int argc, char *argv[]); // Fibonacci Golden Model - DO NOT CHANGE! unsigned long long fibonacci_g(unsigned long long n) { if (n < 2) return n; return fibonacci_g(n - 2) + fibonacci_g(n - 1); } // Run the Fibonacci unsigned long long fib(unsigned long long n) { if (n < 2) return n; unsigned long long x,y; #pragma omp task shared(x) x = fib(n - 2); #pragma omp task shared(y) y = fib(n - 1); #pragma omp taskwait return x+y; } int main(int argc, char *argv[]) { parse_arguments(argc, argv); printf(SEPARATOR); printf("Number: %d\n", N); printf("Cutoff: %d\n", CUTOFF); printf(SEPARATOR); // Run Jacobi solver start_timer(); unsigned long long f_n; #pragma omp parallel shared(f_n) num_threads(NTHREADS) { #pragma omp single nowait { f_n = fib(N); } } stop_timer(); // Check error of final solution unsigned long long g_n; if(N==30) g_n = F_30; else if (N==40) g_n = F_40; else if (N==50) g_n = F_50; else if (N==60) g_n = F_60; else g_n = fibonacci_g(N); unsigned long long err = f_n - g_n; printf(SEPARATOR); printf("F(%d) = %llu\n", N, f_n); printf("Error = %llu\n", err); printf("Runtime = %lf ms\n", elapsed_ns() / 1E6); printf(SEPARATOR); return 0; } int parse_int(const char *str) { char *next; int value = strtoul(str, &next, 10); return strlen(next) ? -1 : value; } double parse_double(const char *str) { char *next; double value = strtod(str, &next); return strlen(next) ? -1 : value; } void parse_arguments(int argc, char *argv[]) { // Set default values N = 40; CUTOFF = 20; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--number") || !strcmp(argv[i], "-n")) { if (++i >= argc || (N = parse_int(argv[i])) < 0) { printf("Invalid matrix order\n"); exit(1); } } else if (!strcmp(argv[i], "--cutoff") || !strcmp(argv[i], "-c")) { if (++i >= argc || (CUTOFF = parse_int(argv[i])) < 0) { printf("Invalid seed\n"); exit(1); } } else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("\n"); printf("Usage: ./jacobi [OPTIONS]\n\n"); printf("Options:\n"); printf(" -h --help Print this message\n"); printf(" -c --cutoff C Set task cutoff\n"); printf(" -n --number N Set the Fibonacci number\n"); printf("\n"); exit(0); } else { printf("Unrecognized argument '%s' (try '--help')\n", argv[i]); exit(1); } } }
conv_dw_kernel_mips.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "conv_dw_kernel_mips.h" #include <stdint.h> #include <stdlib.h> #include <math.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) void relu(float* data, int size, int activation) { for(int i = 0; i < size; i++) { data[i] = max(data[i], ( float )0); if(activation > 0) { data[i] = min(data[i], ( float )activation); } } } void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g=0; g<group; g++) { float* out = output + g * c_step_out; float* outptr = out; float* outptr2 = outptr + outw; const float bias0 = _bias ? _bias[g] : 0.f; const float* kernel0 = kernel + g*9; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g=0; g<group; g++) { float* out = output + g * c_step_out; float* outptr = out; const float* kernel0 = kernel + g*9; const float bias0 = _bias ? _bias[g] : 0.f; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v) { float* ptr = input; float* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(float)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } int conv_dw_run(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity) { float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* kernel = ( float* )weight_tensor->data; float* biases = NULL; if (bias_tensor) biases = (float*)bias_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_chw = inc * inh * inw; int outc = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_chw = out_hw * outc; int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int stride_w = param->stride_w; int stride_h = param->stride_h; int dilation_w = param->dilation_w; int dilation_h = param->dilation_h; int group = param->group; int activation = param->activation; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; float* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input; else { input_tmp = (float*)malloc(inh_tmp * inw_tmp * group * sizeof(float)); for (int g=0; g<group; g++) { float* pad_in = input + g * inh * inw; float* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f); } } /* process */ for(int i = 0; i < batch_number; i++) { if (stride_h == 1) convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); else convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); } /* relu */ if (activation >= 0) relu(output, batch_number * out_chw, activation); if (!(inh_tmp == inh && inw_tmp == inw)) free(input_tmp); return 0; }
mixed_tentusscher_myo_epi_2004_S1.c
// Scenario 1 - Original Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S1.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated version from CellML !!! FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
vecsort.c
#include <stdlib.h> #include <unistd.h> #include <getopt.h> #include <stdio.h> #include <ctype.h> #include <omp.h> #include <string.h> #include <sys/time.h> int TASK_THREADS = 6; int DATA_THREADS = 2; /* Ordering of the vector */ typedef enum Ordering { ASCENDING, DESCENDING, RANDOM } Order; /** * Prints the given vector. * @param v pointer to vector * @param l length of the vector */ void print_v(int *v, long l) { printf("\n"); for (long i = 0; i < l; i++) { if (i != 0 && (i % 10 == 0)) { printf("\n"); } printf("%d ", v[i]); } printf("\n"); } /** * Print a two dim pointer vector * @param v vector * @param rows * @param length */ void print_2d_v(int **v, long rows, const long* row_lengths) { printf("\n"); for (long i = 0; i < rows; i++) { long length = row_lengths[i]; for (long j = 0; j < length; j++) { printf("%d \t", v[i][j]); } printf("\n"); } } /** * Checks whether the given vectors is in sorted in ascending order. * @param v pointer to the sorted vectors * @param rows number of vectors * @param l length of the vectors * @return 0 if not sorted, 1 if sorted */ int check_result(int **v, long rows, const long* row_lengths) { for (long r = 0; r < rows; r++) { int prev = v[r][0]; long l = row_lengths[r]; for (long i = 1; i < l; i++) { if (prev > v[r][i]) { printf("warning: vector at row[%ld] is not sorted", r); print_v(v[r], l); return 0; } prev = v[r][i]; } } return 1; } /** * Calculates and prints results of sorting. E.g. * Elements Time in s Elements/s * 10000000 3.134850e-01 3.189945e+07 * * @param tv1 first time value * @param tv2 second time value * @param length number of elements sorted */ void print_results(struct timeval tv1, struct timeval tv2, long rows, long sum_elements, long* row_lengths, int **v) { double time = (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec); printf("Output \033[32;1m(%s)\033[0m:\n\n %10s %13s %13s\n ", check_result(v, rows, row_lengths) == 0 ? "incorrect" : "correct", " Elements", "Time in s", "Elements/s"); printf("%10zu % .6e % .6e\n", sum_elements, time, (double) sum_elements / time); } /** * Merges two sub-lists together. * * Left source half is A[low:mid-1] * Right source half is A[mid:high-1] * Result is B[low:high-1]. * * @param a sublist a * @param low lower bound * @param mid mid bound * @param high upper bound * @param b sublist b */ void merge(const int *a, long low, long mid, long high, int *b) { long i = low, j = mid; // While there are elements in the left or right list... for (long k = low; k < high; k++) { // If left list head exists and is <= existing right list head. if (i < mid && (j >= high || a[i] <= a[j])) { b[k] = a[i]; i++; } else { b[k] = a[j]; j++; } } } /** * Sequentially splits the given list and merges them (sorted) together. * * @param b sublist b * @param low lower bound * @param high upper bound * @param a sublist a */ void split_seq(int *b, long low, long high, int *a) { if (high - low < 2) return; // recursively split long mid = (low + high) >> 1; // divide by 2 split_seq(a, low, mid, b); // sort left part split_seq(a, mid, high, b); // sort right part // merge from b to a merge(b, low, mid, high, a); } /** * Splits in parallel the given list and merges them (sorted) together. * * @param b sublist b * @param low lower bound * @param high upper bound * @param a sublist a */ void split_parallel(int *b, long low, long high, int *a) { // parallelism threshold if (high - low < 1000) { split_seq(b, low, high, a); return; } // recursively split long mid = (low + high) >> 1; // divide by 2 #pragma omp task shared(a, b) firstprivate(low, mid) split_parallel(a, low, mid, b); // sort left part #pragma omp task shared(a, b) firstprivate(mid, high) split_parallel(a, mid, high, b); // sort right part #pragma omp taskwait // merge from b to a merge(b, low, mid, high, a); } /** * Sort the vector of vectors with data parallelism * @param vector * @param rows * @param length */ void vecsort_datapar(int **vector, long rows, long* row_lengths, long sum_elements) { struct timeval tv1, tv2; printf("Running parallel - Rows %ld \n", rows); printf("Number of threads for data par %d\n", DATA_THREADS); int **b = (int **) malloc(sizeof(int) * sum_elements); long row; /* initialize b */ for (row = 0; row < rows; row++) { long length = row_lengths[row]; b[row] = (int *) malloc(sizeof(int) * length); memcpy(b[row], vector[row], length * sizeof(int)); } /* start sorting one by one */ gettimeofday(&tv1, NULL); #pragma omp parallel for num_threads(DATA_THREADS) for (row = 0; row < rows; row++) { long length = row_lengths[row]; split_seq(b[row], 0, length, vector[row]); } gettimeofday(&tv2, NULL); print_results(tv1, tv2, rows, sum_elements, row_lengths, vector); } /** * Sort the vector of vectors with both data and task parallelism * @param vector * @param rows * @param length */ void vecsort_taskpar(int **vector, long rows, long* row_lengths, long sum_elements) { struct timeval tv1, tv2; printf("Running parallel - Rows %ld \n", rows); printf("Number of threads for data par %d\n", DATA_THREADS); printf("Number of threads for task par %d\n", TASK_THREADS); /* needed to enable nested parallelism */ omp_set_nested(1); int **b = (int **) malloc(sizeof(int) * sum_elements); /* initialize b */ for (long row = 0; row < rows; row++) { long length = row_lengths[row]; b[row] = (int *) malloc(sizeof(int) * length); memcpy(b[row], vector[row], length * sizeof(int)); } /* start sorting one by one */ gettimeofday(&tv1, NULL); #pragma omp parallel num_threads(DATA_THREADS) { #pragma omp for for (long row = 0; row < rows; row++) { long length = row_lengths[row]; #pragma omp parallel num_threads(TASK_THREADS) { #pragma omp single { split_parallel(b[row], 0, length, vector[row]); }; } } } gettimeofday(&tv2, NULL); print_results(tv1, tv2, rows, sum_elements, row_lengths, vector); } /** * Sort the vector of vectors sequentially * * @param vector * @param rows * @param length */ void vecsort_seq(int **vector, long rows, long* row_lengths, long sum_elements) { struct timeval tv1, tv2; printf("Running sequential - %ld Rows\n", rows); int **b = (int **) malloc(sizeof(int) * sum_elements); long row; /* initialize b */ for (row = 0; row < rows; row++) { long length = row_lengths[row]; b[row] = (int *) malloc(sizeof(int) * length); memcpy(b[row], vector[row], length * sizeof(int)); } /* start sorting one by one */ gettimeofday(&tv1, NULL); for (row = 0; row < rows; row++) { long length = row_lengths[row]; split_seq(b[row], 0, length, vector[row]); } gettimeofday(&tv2, NULL); print_results(tv1, tv2, rows, sum_elements, row_lengths, vector); } /** * * usage: ./vecsort * * arguments: * -a initial order ascending * -d initial order descending * -r initial order random * -l {number of elements} length of each vector. if {-v} is used, this will be the upper bound of size. default 1000 * -R {number of rows} number of vectors to create. default 1000 * -v variable length. if enables, {-R} vectors will be created, each with size in the lange of [l/2, l]. * -g debug mode -> print vector * -s {seed} provide seed for srand * -P run with data parallelization only. * -D {data threads count} number of threads in data parallel execution. default 2 * -T {task threads count} number of threads in task parallel execution. default 6 * -S executes sequentially * * Examples: * - a debug example with 10 x 10 dimension and random size (total elements will NTO be 100). Both data and task par with 4 threads each * ./vecsort -r -R 10 -l 10 -g -v * * - a debug example with 10 x 10 dimension and random size (total elements will be 100). Only data parallel * ./vecsort -r -R 10 -l 10 -g -P * * - run a real fixed size example with only data parallel version. 4 Threads each * ./vecsort -r -R 1000 -l 1000 -P -D 4 -T 4 */ int main(int argc, char **argv) { int c; int seed = 42; long length = 1e4; long rows = 1e2; int var_length = 0; long *row_lengths; long sum_elements = 0; int sequential = 0; int datapar_only = 0; int debug = 0; Order order = ASCENDING; /* Read command-line options. */ while ((c = getopt(argc, argv, "adrgl:s:R:D:T:SPv")) != -1) { switch (c) { case 'a': order = ASCENDING; break; case 'd': order = DESCENDING; break; case 'r': order = RANDOM; break; case 'l': length = atol(optarg); break; case 'g': debug = 1; break; case 's': seed = atoi(optarg); break; case 'R': rows = atoi(optarg); break; case 'S': sequential = 1; break; case 'P': datapar_only = 1; break; case 'v': var_length = 1; break; case 'D': DATA_THREADS = atoi(optarg); break; case 'T': TASK_THREADS = atoi(optarg); break; case '?': if (optopt == 'l' || optopt == 's') { fprintf(stderr, "Option -%c requires an argument.\n", optopt); } else if (isprint(optopt)) { fprintf(stderr, "Unknown option '-%c'.\n", optopt); } else { fprintf(stderr, "Unknown option character '\\x%x'.\n", optopt); } return -1; default: return -1; } } row_lengths = (long*) malloc(rows * sizeof(long)); /* create a vector that indicates the size of each sub-vector */ if ( var_length ) { printf("Variable length mode.\n"); for (int r = 0; r < rows; r++) { long l = rand() % (length + 1 - (length / 2)) + (length / 2); row_lengths[r] = l; sum_elements += l; } } else { printf("Fixed size mode.\n"); for (int r = 0; r < rows; r++) { row_lengths[r] = length; sum_elements += length; } } printf("A total of %ld elements will be sorted.\n", sum_elements); int **vector = (int **) malloc(sum_elements * sizeof(int)); if (vector == NULL) { printf("Malloc failed..."); return -1; } for (int r = 0; r < rows; r++) { /* Seed such that we can always reproduce the same random vector */ srand(seed + r); /* length of this array */ long l = row_lengths[r]; /* Allocate vector. */ int *array; array = (int *) malloc(l * sizeof(int)); if (array == NULL) { fprintf(stderr, "Malloc failed...\n"); return -1; } /* Fill array. */ switch (order) { case ASCENDING: for (long i = 0; i < l; i++) { array[i] = (int) i; } break; case DESCENDING: for (long i = 0; i < l; i++) { array[i] = (int) (l - i); } break; case RANDOM: for (long i = 0; i < l; i++) { array[i] = rand(); } break; } /* Assign array to vector elems */ vector[r] = array; } if (debug) { printf("Initial vector ::"); print_2d_v(vector, rows, row_lengths); } /* Sort */ if (sequential) { vecsort_seq(vector, rows, row_lengths, sum_elements); } else { if (datapar_only) { vecsort_datapar(vector, rows, row_lengths, sum_elements); } else { vecsort_taskpar(vector, rows, row_lengths, sum_elements); } } if (debug) { printf("Final vector ::"); print_2d_v(vector, rows, row_lengths); } return 0; }
nco_rth_utl.c
/* $Header$ */ /* Purpose: Arithmetic controls and utilities */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_rth_utl.h" /* Arithmetic controls and utilities */ nco_rth_prc_rnk_enm /* [enm] Ranked precision of arithmetic type */ nco_rth_prc_rnk /* [fnc] Rank precision of arithmetic type */ (const nc_type nco_typ) /* I [enm] netCDF type of operand */ { /* Purpose: Ranked precision of arithmetic type */ switch(nco_typ){ case NC_FLOAT: return nco_rth_prc_rnk_float; case NC_DOUBLE: return nco_rth_prc_rnk_double; case NC_INT: return nco_rth_prc_rnk_int; case NC_SHORT: return nco_rth_prc_rnk_short; case NC_CHAR: return nco_rth_prc_rnk_char; case NC_BYTE: return nco_rth_prc_rnk_byte; case NC_UBYTE: return nco_rth_prc_rnk_ubyte; case NC_USHORT: return nco_rth_prc_rnk_ushort; case NC_UINT: return nco_rth_prc_rnk_uint; case NC_INT64: return nco_rth_prc_rnk_int64; case NC_UINT64: return nco_rth_prc_rnk_uint64; case NC_STRING: return nco_rth_prc_rnk_string; default: nco_dfl_case_nc_type_err(); break; } /* end switch */ /* Some compilers, e.g., SGI cc, need return statement to end non-void functions */ return (nco_rth_prc_rnk_enm)0; } /* end nco_rth_prc_rnk() */ void nco_opr_nrm /* [fnc] Normalization of arithmetic operations for ncra/nces */ (const int nco_op_typ, /* I [enm] Operation type */ const int nbr_var_prc, /* I [nbr] Number of processed variables */ X_CST_PTR_CST_PTR_Y(var_sct,var_prc), /* I [sct] Variables in input file */ X_CST_PTR_CST_PTR_Y(var_sct,var_prc_out), /* I/O [sct] Variables in output file */ const char * const rec_nm_fll, /* I [sng] Full name of record dimension */ const trv_tbl_sct * const trv_tbl) /* I [sct] Traversal table */ { /* Purpose: Normalize appropriate ncra/nces operation (avg, min, max, ttl, ...) on operands Values of var_prc are not altered but are not const because missing values are cast Values of var_prc_out are altered (i.e., normalized) */ int idx=int_CEWI; int nbr_var_prc_cpy; int nco_op_typ_cpy; nco_op_typ_cpy=nco_op_typ; nbr_var_prc_cpy=nbr_var_prc; #ifdef _OPENMP #pragma omp parallel for private(idx) shared(nbr_var_prc_cpy,nco_op_typ_cpy,var_prc,var_prc_out) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc_cpy;idx++){ /* In normalizations over record dimension, only normalize those variables that contain current record dimension */ if(rec_nm_fll){ nco_bool flg_skp=nco_skp_var(var_prc[idx],rec_nm_fll,trv_tbl); if(flg_skp) continue; } /* !rec_nm_fll */ if(var_prc[idx]->is_crd_var){ /* Return linear averages of coordinates unless computing extrema Prevent coordinate variables from encountering nco_var_nrm_sdn() */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); }else{ /* !var_prc[idx]->is_crd_var */ switch(nco_op_typ_cpy){ case nco_op_avg: /* Normalize sum by tally to create mean */ case nco_op_mebs: /* Normalize sum by tally to create mean */ case nco_op_sqrt: /* Normalize sum by tally to create mean */ case nco_op_sqravg: /* Normalize sum by tally to create mean */ case nco_op_rms: /* Normalize sum of squares by tally to create mean square */ case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); break; case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */ (void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); break; case nco_op_min: /* Minimum is already in buffer, do nothing */ case nco_op_max: /* Maximum is already in buffer, do nothing */ case nco_op_mibs: /* Minimum absolute value is already in buffer, do nothing */ case nco_op_mabs: /* Maximum absolute value is already in buffer, do nothing */ break; case nco_op_tabs: /* Total absolute value is already in buffer, stuff missing values into elements with zero tally */ case nco_op_ttl: /* Total is already in buffer, stuff missing values into elements with zero tally */ (void)nco_var_tll_zro_mss_val(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val); break; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* A few operations require additional processing */ switch(nco_op_typ_cpy) { case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */ case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */ case nco_op_sqrt: /* Take root of mean to create root mean */ (void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val); break; case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */ (void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val); break; case nco_op_avg: case nco_op_ttl: case nco_op_min: case nco_op_max: case nco_op_mibs: case nco_op_mabs: case nco_op_mebs: case nco_op_tabs: case nco_op_avgsqr: break; default: nco_dfl_case_generic_err(); break; } /* end switch */ } /* !var_prc[idx]->is_crd_var */ } /* end (OpenMP parallel for) loop over variables */ } /* end nco_opr_nrm() */ void nco_opr_drv /* [fnc] Intermediate control of arithmetic operations for ncra/nces */ (const long idx_rec, /* I [idx] Index of record (ncra), file (ncfe), or group (ncge) in current operation */ const int nco_op_typ, /* I [enm] Operation type */ const var_sct * const var_prc, /* I [sct] Variable in input file */ var_sct * const var_prc_out) /* I/O [sct] Variable in output file */ { /* Purpose: Perform appropriate ncra/nces operation (avg, min, max, ttl, ...) on operands nco_opr_drv() is called within the record loop of ncra, and within file loop of nces These operations perform some, but not all, of necessary operations for each procedure Most arithmetic operations require additional procedures such as normalization be performed after all files/records have been processed Some operations require special care at initialization This determination is based on the idx_rec variable When idx_rec == 0, these operations may perform special initializations The exact numeric value of idx_rec does not matter What matters is whether it is zero or non-zero */ /* NCO's paradigm is that coordinate variables represent grid axes Reducing such grids to a single-value must be done The most representative value of the grid is the average The total, min, max, rms, etc. of the grid usually makes no sense Users are most interested in the mean grid coordinate 20130112: The same logic applies to CF-style coordinates, e.g., to variables matching CF "bounds", "climatology", and "coordinates" conventions */ if(var_prc->is_crd_var){ (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); return; } /* !var_prc->is_crd_var */ /* var_prc_out->type and var_prc->type should be equal and thus interchangeable var_prc_out->sz and var_prc->sz should be equal and thus interchangeable */ switch (nco_op_typ){ case nco_op_min: /* Minimum */ /* On first loop, simply copy variables from var_prc to var_prc_out */ if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_min_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_max: /* Maximum */ /* On first loop, simply copy variables from var_prc to var_prc_out */ if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_max_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_mabs: /* Maximum absolute value */ /* Always take the absolute value of the fresh input Then, on first loop, copy variable from var_prc to var_prc_out like min and max Following loops, do comparative maximum after taking absolute */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_max_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_mebs: /* Mean absolute value */ /* Always take the absolute value of the fresh input Every loop add and increment tally like avg, sqrt, sqravg */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_mibs: /* Mean absolute value */ /* Always take the absolute value of the fresh input Then, on first loop, copy variable from var_prc to var_prc_out like min and max Following loops, do comparative minimum after taking absolute value */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); if(idx_rec == 0) (void)nco_var_copy(var_prc->type,var_prc->sz,var_prc->val,var_prc_out->val); else (void)nco_var_min_bnr(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc_out->val); break; case nco_op_tabs: /* Total absolute value */ /* Same as ttl but take absolute first */ (void)nco_var_abs(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val); if(idx_rec == 0) (void)nco_var_copy_tll(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->val,var_prc_out->val); else (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_ttl: /* Total */ /* NB: Copying input to output on first loop for nco_op_ttl, in similar manner to nco_op_[max/min], can work However, copying with nco_var_copy() would not change the tally variable, leaving it equal to zero Then an extra step would be necessary to set tally equal to one where missing values were not present Otherwise, e.g., ensemble averages of one file would never have non-zero tallies Hence, use special nco_var_copy_tll() function to copy and change tally only in first loop iteration This way, tally is self-consistent with var_prc_out at all times Moreover, running total must never be set to missing_value, because subsequent additions (with nco_var_add_tll_ncra()) only check new addend (not running sum) against missing value. Hence (as of 20120521) nco_var_copy_tll() specifically resets sum to zero rather than to missing value Parent function (e.g., ncra.c) must post-process ttl buffers nco_op_ttl with nco_var_tll_zro_mss_val() */ if(idx_rec == 0) (void)nco_var_copy_tll(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->val,var_prc_out->val); else (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_avg: /* Average */ case nco_op_sqrt: /* Squareroot will produce the squareroot of the mean */ case nco_op_sqravg: /* Square of the mean */ /* These operations all require subsequent normalization, where degenerate tallies are accounted for Thus, they all call nco_var_add_tll_ncra() every iteration, without special treatment on first iteration */ (void)nco_var_add_tll_ncra(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; case nco_op_rms: /* Root mean square */ case nco_op_rmssdn: /* Root mean square normalized by N-1 */ case nco_op_avgsqr: /* Mean square */ /* Square values in var_prc first */ nco_var_mlt(var_prc->type,var_prc->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->val,var_prc->val); /* Sum the squares */ (void)nco_var_add_tll_ncra(var_prc_out->type,var_prc_out->sz,var_prc->has_mss_val,var_prc->mss_val,var_prc->tally,var_prc->wgt_crr,var_prc->wgt_sum,var_prc->val,var_prc_out->val); break; default: nco_dfl_case_generic_err(); break; /* [enm] Nil or undefined operation type */ } /* end switch */ } /* end nco_opr_drv() */ const char * /* O [enm] Arithmetic operation */ nco_op_typ_cf_sng /* [fnc] Convert arithmetic operation type enum to string */ (const int nco_op_typ) /* I [enm] Arithmetic operation type */ { /* Purpose: Convert arithmetic operation type enum to string for use in CF Cell Methods http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/cf-conventions.html#cell-methods */ switch(nco_op_typ){ case nco_op_avg: return "mean"; break; /* [enm] Average */ case nco_op_min: return "minimum"; break; /* [enm] Minimum value */ case nco_op_max: return "maximum"; break; /* [enm] Maximum value */ case nco_op_ttl: return "sum"; break; /* [enm] Linear sum */ case nco_op_tabs: return "sum_absolute_value"; break; /* [enm] Total absolute value */ case nco_op_mabs: return "maximum_absolute_value"; break; /* [enm] Maximum absolute value */ case nco_op_mebs: return "mean_absolute_value"; break; /* [enm] Mean absolute value */ case nco_op_mibs: return "minimum_absolute_value"; break; /* [enm] Minimum absolute value */ case nco_op_sqravg: return "square_of_mean"; break; /* [enm] Square of mean */ case nco_op_avgsqr: return "variance"; break; /* [enm] Mean of sum of squares */ case nco_op_sqrt: return "square_root_of_mean"; break; /* [enm] Square root of mean */ case nco_op_rms: return "root_mean_square"; break; /* [enm] Root-mean-square (normalized by N) */ case nco_op_rmssdn: return "root_mean_square_nm1"; break; /* [enm] Root-mean square normalized by N-1 */ case nco_op_add: case nco_op_sbt: case nco_op_mlt: case nco_op_dvd: case nco_op_nil: default: nco_dfl_case_generic_err(); return "BROKEN"; /* CEWI */ break; /* [enm] Nil or undefined operation type */ } /* end switch */ } /* end nco_op_typ_cf_sng() */ int /* O [enm] Arithmetic operation */ nco_op_typ_get /* [fnc] Convert user-specified operation into operation key */ (const char * const nco_op_sng) /* I [sng] User-specified operation */ { /* Purpose: Process '-y' command line argument Convert user-specified string to enumerated operation type */ const char fnc_nm[]="nco_op_typ_get()"; /* [sng] Function name */ char *nco_prg_nm; /* [sng] Program name */ int nco_prg_id; /* [enm] Program ID */ nco_prg_nm=nco_prg_nm_get(); /* [sng] Program name */ nco_prg_id=nco_prg_id_get(); /* [enm] Program ID */ if(nco_op_sng == NULL){ /* If nco_op_typ_get() is called when user-specified option string is NULL, then operation type may be implied by program name itself */ if(!strcmp(nco_prg_nm,"ncadd")) return nco_op_add; if(!strcmp(nco_prg_nm,"mpncbo")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"mpncdiff")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncbo")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncdiff")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncsub")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncsubtract")) return nco_op_sbt; if(!strcmp(nco_prg_nm,"ncmult")) return nco_op_mlt; if(!strcmp(nco_prg_nm,"ncmultiply")) return nco_op_mlt; if(!strcmp(nco_prg_nm,"ncdivide")) return nco_op_dvd; (void)fprintf(stderr,"%s: ERROR %s reports empty user-specified operation string in conjunction with unknown or ambiguous executable name %s\n",nco_prg_nm,fnc_nm,nco_prg_nm); nco_exit(EXIT_FAILURE); } /* endif */ if(!strcmp(nco_op_sng,"avg") || !strcmp(nco_op_sng,"average") || !strcmp(nco_op_sng,"mean")) return nco_op_avg; if(!strcmp(nco_op_sng,"avgsqr")) return nco_op_avgsqr; if(!strcmp(nco_op_sng,"mabs") || !strcmp(nco_op_sng,"maximum_absolute_value")) return nco_op_mabs; if(!strcmp(nco_op_sng,"mebs") || !strcmp(nco_op_sng,"mean_absolute_value")) return nco_op_mebs; if(!strcmp(nco_op_sng,"mibs") || !strcmp(nco_op_sng,"minimum_absolute_value")) return nco_op_mibs; if(!strcmp(nco_op_sng,"max") || !strcmp(nco_op_sng,"maximum")) return nco_op_max; if(!strcmp(nco_op_sng,"min") || !strcmp(nco_op_sng,"minimum")) return nco_op_min; if(!strcmp(nco_op_sng,"rms") || !strcmp(nco_op_sng,"root-mean-square")) return nco_op_rms; if(!strcmp(nco_op_sng,"rmssdn")) return nco_op_rmssdn; if(!strcmp(nco_op_sng,"sqravg")) return nco_op_sqravg; if(!strcmp(nco_op_sng,"sqrt") || !strcmp(nco_op_sng,"square-root")) return nco_op_sqrt; if(!strcmp(nco_op_sng,"total") || !strcmp(nco_op_sng,"ttl") || !strcmp(nco_op_sng,"sum")) return nco_op_ttl; if(!strcmp(nco_op_sng,"tabs") || !strcmp(nco_op_sng,"ttlabs") || !strcmp(nco_op_sng,"sumabs")) return nco_op_tabs; if(!strcmp(nco_op_sng,"add") || !strcmp(nco_op_sng,"+") || !strcmp(nco_op_sng,"addition")) return nco_op_add; if(!strcmp(nco_op_sng,"sbt") || !strcmp(nco_op_sng,"-") || !strcmp(nco_op_sng,"dff") || !strcmp(nco_op_sng,"diff") || !strcmp(nco_op_sng,"sub") || !strcmp(nco_op_sng,"subtract") || !strcmp(nco_op_sng,"subtraction")) return nco_op_sbt; if(!strcmp(nco_op_sng,"dvd") || !strcmp(nco_op_sng,"/") || !strcmp(nco_op_sng,"divide") || !strcmp(nco_op_sng,"division")) return nco_op_dvd; if(!strcmp(nco_op_sng,"mlt") || !strcmp(nco_op_sng,"*") || !strcmp(nco_op_sng,"mult") || !strcmp(nco_op_sng,"multiply") || !strcmp(nco_op_sng,"multiplication")) return nco_op_mlt; (void)fprintf(stderr,"%s: ERROR %s reports unknown user-specified operation type \"%s\"\n",nco_prg_nm,fnc_nm,nco_op_sng); (void)fprintf(stderr,"%s: HINT Valid operation type (op_typ) choices:\n",nco_prg_nm); if(nco_prg_id == ncbo) (void)fprintf(stderr,"addition: add,+,addition\nsubtraction: sbt,-,dff,diff,sub,subtract,subtraction\nmultiplication: mlt,*,mult,multiply,multiplication\ndivision: dvd,/,divide,division\n"); else (void)fprintf(stderr,"min or minimum, max or maximum, mabs or maximum_absolute_value, mebs or mean_absolute_value, mibs or maximum_absolute_value, tabs or ttlabs or sumabs, ttl or total or sum, avg or average or mean, sqrt or square-root, sqravg, avgsqr, rms or root-mean-square, rmssdn\n"); nco_exit(EXIT_FAILURE); return False; /* Statement should not be reached */ } /* end nco_op_typ_get() */ int /* O [enm] Relational operation */ nco_op_prs_rlt /* [fnc] Convert Fortran abbreviation for relational operator into NCO operation key */ (const char * const op_sng) /* I [sng] Fortran representation of relational operator */ { /* Purpose: Convert Fortran abbreviation for relational operator into NCO operation key */ /* Classify the relation */ if(!strcmp(op_sng,"eq")){ return nco_op_eq; }else if(!strcmp(op_sng,"ne")){ return nco_op_ne; }else if(!strcmp(op_sng,"lt")){ return nco_op_lt; }else if(!strcmp(op_sng,"gt")){ return nco_op_gt; }else if(!strcmp(op_sng,"le")){ return nco_op_le; }else if(!strcmp(op_sng,"ge")){ return nco_op_ge; }else{ (void)fprintf(stdout,"%s: ERROR %s not registered in nco_op_prs_rlt()\n",nco_prg_nm_get(),op_sng); nco_exit(EXIT_FAILURE); } /* end else */ /* Some compilers, e.g., SGI cc, need return statement to end non-void functions */ return False; /* Statement should not be reached */ } /* end nco_op_prs_rlt() */ void vec_set /* [fnc] Fill every value of first operand with value of second operand */ (const nc_type type, /* I [enm] netCDF type of operand */ const long sz, /* I [nbr] size (in elements) of operand */ ptr_unn op1, /* I [sct] Values of first operand */ const double op2) /* I [frc] Value to fill vector with */ { /* Purpose: Fill every value of first operand with value of second operand */ long idx; /* Typecast pointer to values before access */ (void)cast_void_nctype(type,&op1); switch(type){ case NC_FLOAT: for(idx=0;idx<sz;idx++) op1.fp[idx]=(float)op2; break; case NC_DOUBLE: for(idx=0;idx<sz;idx++) op1.dp[idx]=op2; break; case NC_INT: for(idx=0;idx<sz;idx++) op1.ip[idx]=(nco_int)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_SHORT: for(idx=0;idx<sz;idx++) op1.sp[idx]=(nco_short)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_USHORT: for(idx=0;idx<sz;idx++) op1.usp[idx]=(nco_ushort)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_UINT: for(idx=0;idx<sz;idx++) op1.uip[idx]=(nco_uint)lrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_INT64: for(idx=0;idx<sz;idx++) op1.i64p[idx]=(nco_int64)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_UINT64: for(idx=0;idx<sz;idx++) op1.ui64p[idx]=(nco_uint64)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_BYTE: for(idx=0;idx<sz;idx++) op1.bp[idx]=(nco_byte)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_UBYTE: for(idx=0;idx<sz;idx++) op1.ubp[idx]=(nco_ubyte)llrint(op2); /* Coerce to avoid C++ compiler assignment warning */ break; case NC_CHAR: break; /* Do nothing */ case NC_STRING: break; /* Do nothing */ default: nco_dfl_case_nc_type_err(); break; } /* end switch */ /* NB: it is not neccessary to un-typecast pointers to values after access because we have only operated on local copies of them. */ } /* end vec_set() */ void nco_zero_long /* [fnc] Zero all values of long array */ (const long sz, /* I [nbr] Size (in elements) of operand */ long * restrict const op1) /* I/O [nbr] Array to be zeroed */ { /* Purpose: Zero all values of long array */ /* Presumably this old method used until 20050321, and then again after 20120330, is slower than memset() because of pointer de-referencing. However, it does have the virtue of being correct. */ if(op1 == NULL){ (void)fprintf(stdout,"%s: ERROR nco_zero_long() asked to zero NULL pointer\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* endif */ size_t sz_byt; /* [B] Number of bytes in variable buffer */ sz_byt=(size_t)sz*sizeof(long); (void)memset((void *)op1,0,sz_byt); } /* end nco_zero_long() */ void nco_set_long /* [fnc] Set all values of long array */ (const long sz, /* I [nbr] Size (in elements) of operand */ const long val, /* I [] Number to set array to */ long * restrict const op1) /* I/O [nbr] Array to be set */ { /* Purpose: Set all values of long array to input value */ long idx; if(op1 == NULL){ (void)fprintf(stdout,"%s: ERROR nco_set_long() asked to set NULL pointer\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* endif */ for(idx=0;idx<sz;idx++) op1[idx]=val; } /* end nco_set_long() */
pi.c
#include <omp.h> #include <stdio.h> #include <math.h> #include <assert.h> /* Example use: printf(" checking error diff ratio \n"); diff_ratio (error, error_ref, 5); // 6 is better, 7 is very restrictive */ // value, reference value, and the number of significant digits to be ensured. double diff_ratio (double val, double ref, int significant_digits) { assert (significant_digits>=1); double diff_ratio = fabs(val - ref )/fabs(ref); double upper_limit = pow (0.1, significant_digits); // 1.0/(double(10^significant_digits)) ; // printf("value :%E ref_value: %E diff_ratio: %E upper_limit: %E \n",val, ref, diff_ratio, upper_limit); // ensure the number of the significant digits to be the same if (diff_ratio >= upper_limit) printf("value :%E ref_value: %E diff_ratio: %E >= upper_limit: %E \n",val, ref, diff_ratio, upper_limit); assert ( diff_ratio < upper_limit); return diff_ratio; } int num_steps = 10000; int main() { double x=0; double sum = 0.0, pi; int i; double step = 1.0/(double) num_steps; #pragma omp parallel for private(i,x) reduction(+:sum) schedule(static) for (i=0; i<num_steps; i=i+1) { x=(i+0.5)*step; sum = sum + 4.0/(1.0+x*x); } pi=step*sum; printf("%f, diff_ratio=%f\n", pi, diff_ratio (pi, 3.141593,6)); return 0; }
GB_unop__identity_uint8_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_int32) // op(A') function: GB (_unop_tran__identity_uint8_int32) // C type: uint8_t // A type: int32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_int32) ( uint8_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
laplace2d-04c.c
/* * Copyright 2012 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <string.h> #include <stdio.h> #include <omp.h> #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 200; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); double st = omp_get_wtime(); int iter = 0; #pragma omp target data map(alloc:Anew) map(A) while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp target teams distribute parallel for collapse(2) reduction(max:error) for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp target teams distribute parallel for collapse(2) for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double et = omp_get_wtime(); printf(" total: %f s\n", (et - st)); return 0; }
trans_gain.c
/* Daala video codec Copyright (c) 2013 Daala project contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/ /* 1D coding gain (dB) ********************** AR p=.95 4x4 8x8 16x16 ------------------------------------------ KLT 7.5825 8.8462 9.4781 DCT 7.5701 8.8259 9.4555 CDF(9/7) 8.4687 9.4592 9.7866 LappedKLT 8.5633 9.4908 9.8951 LappedDCT 8.5523 9.4871 9.8929 Subset 1 4x4 8x8 16x16 ------------------------------------------ KLT original 8.7714 10.2588 11.0039 collapsed 8.7714 10.2588 11.0039 monty 8.7654 10.2628 11.0292 DCT 8.7620 10.2427 10.9861 8.7620 10.2427 10.9861 8.7561 10.2467 11.0115 CDF(9/7) 9.3794 10.5932 11.0685 9.3845 10.5957 11.0825 9.4155 10.6576 11.1965 LappedKLT 9.6276 10.7860 11.3254 9.6277 10.7867 11.3296 9.6295 10.8056 11.3722 LappedDCT 9.6213 10.7832 11.3230 9.6214 10.7839 11.3272 9.6232 10.8028 11.3698 Subset 3 4x4 8x8 16x16 ------------------------------------------ KLT original 10.5669 12.3711 13.2694 collapsed 10.5669 12.3711 13.2694 monty 10.5495 12.3573 13.2729 DCT 10.5546 12.3532 13.2535 10.5547 12.3532 13.2535 10.5373 12.3395 13.2572 CDF(9/7) 11.3102 12.6838 13.1845 11.3106 12.6871 13.2009 11.3389 12.7764 13.4084 LappedKLT 11.6048 13.0138 13.6488 11.6046 13.0136 13.6491 11.5922 13.0126 13.6790 LappedDCT 11.5970 13.0111 13.6464 11.5968 13.0110 13.6467 11.5844 13.0099 13.6766 */ /* 2D coding gain (dB) ********************** AR p=.95 4x4 8x8 16x16 ------------------------------------------ KLT 15.1649 17.6924 18.9562 DCT 15.1403 17.6518 18.9109 CDF(9/7) 16.9374 18.9183 19.5731 LappedKLT 17.1265 18.9816 19.7902 LappedDCT 17.1047 18.9741 19.7858 Subset 1 4x4 8x8 16x16 ------------------------------------------ KLT original 12.4432 ------- ------- collapsed 12.4428 ------- ------- monty 12.4732 13.6167 14.1170 DCT 12.3695 ------- ------- 12.3698 ------- ------- 12.4182 13.5473 14.0536 CDF(9/7) ------- ------- ------- ------- ------- ------- 13.1425 13.8184 14.0110 LappedKLT 13.2807 ------- ------- 13.2808 ------- ------- 13.3452 14.1273 14.4041 LappedDCT 13.2682 ------- ------- 13.2685 ------- ------- 13.3330 14.1215 14.3981 Subset 3 4x4 8x8 16x16 ------------------------------------------ KLT monty 14.9078 16.2416 16.7839 DCT 14.8313 16.1578 16.7221 CDF(9/7) 15.7553 16.4760 16.6656 LappedKLT 15.9763 16.8549 17.1181 LappedDCT 15.9627 16.8507 17.1152 */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include "od_defs.h" #include "od_filter.h" #include "stats_tools.h" #include "trans_tools.h" #define BLOCKSIZE_LOG (4) #define USE_LAPPING (1) #define USE_KLT (1) #define USE_DCT (0) #define USE_WAVELET (0) #define USE_2D (1) #define USE_FILES (1) #define USE_AR95 (0) #define COMPUTE_NATHAN (1) #define PRINT_COV (0) #define BLOCKSIZE (1<<BLOCKSIZE_LOG) #if USE_WAVELET #if BLOCKSIZE_LOG==1 # define SUPPORT (20) #else # if BLOCKSIZE_LOG==2 # define SUPPORT (40) # else # if BLOCKSIZE_LOG==3 # define SUPPORT (80) # else # if BLOCKSIZE_LOG==4 # define SUPPORT (160) # else # error "no support configuration for transform size" # endif # endif # endif #endif #else #if USE_LAPPING||COMPUTE_NATHAN /* larger than needed for 'new' covariance code, but it won't alter the answer, just produce a larger than needed covariance matrix. It is needed to make the boundary conditions of the 'old' covariance code match the trans and trans2d utils */ #define SUPPORT (BLOCKSIZE*2) #else #define SUPPORT (BLOCKSIZE) #endif #endif const int *f; typedef void (*ne_fdct_func_1d)(double *_out,const double *_in,int _in_stride); typedef void (*ne_idct_func_1d)(double *_out,int _out_stride,const double *_in); extern const ne_idct_func_1d OD_IDCT_1D_DOUBLE[OD_NBSIZES]; extern const ne_fdct_func_1d OD_FDCT_1D_DOUBLE[OD_NBSIZES]; #if USE_FILES typedef struct { int sz; u_int64_t *n; u_int64_t *acc_i; u_int64_t *acc_j; u_int64_t *acc_ij; double *cov; } cov_state; static void cov_init(cov_state *_this, int _sz){ _this->sz = _sz; _this->n = calloc(_sz,sizeof(*_this->n)); _this->acc_i = calloc(_sz,sizeof(*_this->acc_i)); _this->acc_j = calloc(_sz,sizeof(*_this->acc_j)); _this->acc_ij= calloc(_sz,sizeof(*_this->acc_ij)); _this->cov = calloc(_sz,sizeof(*_this->cov)); } static void cov_clear(cov_state *_this){ if(_this){ if(_this->n) free(_this->n); if(_this->acc_i) free(_this->acc_i); if(_this->acc_j) free(_this->acc_j); if(_this->acc_ij) free(_this->acc_ij); if(_this->cov) free(_this->cov); } } #if USE_2D /* 1D and 2D could both use the same generalized code, but it would be harder to read */ static void cov_accumulate_2d(cov_state *_this, const unsigned char *_data, int _stride, int _w, int _h){ int x,y,i,j; int sz = sqrt(_this->sz); for(i=0;i<sz;i++){ for(j=0;j<sz;j++){ int ij = i*sz+j; for(y=0;y<_h-i;y++){ const unsigned char *di=_data+y*_stride; const unsigned char *dj=_data+(y+i)*_stride+j; for(x=0;x<_w-j;x++){ ++_this->n[ij]; _this->acc_i[ij] += di[x]; _this->acc_j[ij] += dj[x]; _this->acc_ij[ij] += di[x]*dj[x]; } } } } } #else static void cov_accumulate_1d(cov_state *_this, const unsigned char *_data, int _stride, int _n){ int i,j; for(i=0;i<_this->sz;i++){ const unsigned char *di=_data; const unsigned char *dj=_data+i*_stride; for(j=0;j<_n-i;j++){ ++_this->n[i]; _this->acc_i[i] += di[j*_stride]; _this->acc_j[i] += dj[j*_stride]; _this->acc_ij[i] += di[j*_stride]*dj[j*_stride]; } } } #endif static void cov_combine(cov_state *_a,const cov_state *_b){ int i; for(i=0;i<_a->sz;i++){ _a->acc_i[i] += _b->acc_i[i]; _a->acc_j[i] += _b->acc_j[i]; _a->acc_ij[i] += _b->acc_ij[i]; _a->n[i] += _b->n[i]; } } static void cov_compute(cov_state *_this){ int i; for(i=0;i<_this->sz;i++) _this->cov[i] = ((double)_this->acc_ij[i] - (double)_this->acc_i[i]* _this->acc_j[i]/_this->n[i])/_this->n[i]; for(i=1;i<_this->sz;i++) _this->cov[i] /= _this->cov[0]; _this->cov[0]=1.; } static void process_files(trans_ctx *_ctx, cov_state *_cov, int _argc, const char *_argv[]){ int ai; #pragma omp parallel for schedule(dynamic) for(ai=1;ai<_argc;ai++){ FILE *fin; video_input vid; video_input_info info; video_input_ycbcr ycbcr; int tid; cov_state *cov; int x0,y0,x1,y1; fin=fopen(_argv[ai],"rb"); if(fin==NULL){ fprintf(stderr,"Could not open '%s' for reading.\n",_argv[ai]); continue; } if(video_input_open(&vid,fin)<0){ fprintf(stderr,"Error reading video info from '%s'.\n",_argv[ai]); continue; } video_input_get_info(&vid,&info); if(video_input_fetch_frame(&vid,ycbcr,NULL)<0){ fprintf(stderr,"Error reading first frame from '%s'.\n",_argv[ai]); continue; } tid=OD_OMP_GET_THREAD; cov=_cov+tid; x0 = info.pic_x; y0 = info.pic_y; x1 = x0 + info.pic_w; y1 = y0 + info.pic_h; fprintf(stderr,"%s\n",_argv[ai]); /* map */ { int stride=ycbcr[0].stride; const unsigned char *data=ycbcr[0].data; #if COMPUTE_NATHAN /* block-based full covariance computation (unlord style) */ int nxblocks=info.pic_w>>BLOCKSIZE_LOG; int nyblocks=info.pic_h>>BLOCKSIZE_LOG; trans_ctx *ctx=_ctx+tid; # if USE_2D unsigned char buf[SUPPORT][SUPPORT]; int x,y,i,j; image_ctx_init(&ctx->img,_argv[ai],nxblocks,nyblocks); for(y=0;y<nyblocks*BLOCKSIZE-SUPPORT+1;y++){ for(x=0;x<nxblocks*BLOCKSIZE-SUPPORT+1;x++){ for(j=0;j<SUPPORT;j++){ for(i=0;i<SUPPORT;i++){ buf[j][i]=data[(y0+y+j)*stride+(x0+x+i)]; } } trans_data_add(&ctx->td,(unsigned char *)buf); } } # else unsigned char buf[SUPPORT]; int x,y,z; image_ctx_init(&ctx->img,_argv[ai],nxblocks,nyblocks); /* add the rows */ for(y=0;y<nyblocks*BLOCKSIZE;y++){ for(x=0;x<nxblocks*BLOCKSIZE-SUPPORT+1;x++){ for(z=0;z<SUPPORT;z++){ buf[z]=data[(y+y0)*stride+x+x0+z]; } trans_data_add(&ctx->td,buf); } } /* add the columns */ for(y=0;y<nyblocks*BLOCKSIZE-SUPPORT+1;y++){ for(x=0;x<nxblocks*BLOCKSIZE;x++){ for(z=0;z<SUPPORT;z++){ buf[z]=data[(y0+y+z)*stride+x+x0]; } trans_data_add(&ctx->td,buf); } } # endif #endif /* Direct computation of collapsed covariance matrix (monty style) */ #if USE_2D cov_accumulate_2d(cov,data+y0*stride+x0,stride,x1-x0,y1-y0); #else { int x,y; for(y=y0;y<y1;y++) cov_accumulate_1d(cov,data+y*stride+x0,1,x1-x0); for(x=x0;x<x1;x++) cov_accumulate_1d(cov,data+y0*stride+x,stride,y1-y0); } #endif } video_input_close(&vid); } } #endif #if USE_WAVELET /* some lifting CDF (9/7) wavelet code from Google Code's axonlib */ /* http://code.google.com/p/axonlib/source/browse/trunk/extern/dwt97.c?spec=svn19&r=19 */ /* single stage of decomposition */ static void fwt97_i(double* x,int n){ double temp[SUPPORT]; double a; int i; /* Predict 1 */ a=-1.586134342; for (i=1;i<n-2;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[n-1]+=2*a*x[n-2]; /* Update 1 */ a=-0.05298011854; for (i=2;i<n;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[0]+=2*a*x[1]; /* Predict 2 */ a=0.8829110762; for (i=1;i<n-2;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[n-1]+=2*a*x[n-2]; /* Update 2 */ a=0.4435068522; for (i=2;i<n;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[0]+=2*a*x[1]; /* Scale */ a=1/1.149604398; for (i=0;i<n;i++) { if (i%2) x[i]*=a; else x[i]/=a; } /* Pack */ for (i=0;i<n;i++){ if (i%2==0) temp[i/2]=x[i]; else temp[n/2+i/2]=x[i]; } for (i=0;i<n;i++) x[i]=temp[i]; } /* single stage of reconstruction */ void iwt97_i(double* x,int n){ double temp[SUPPORT]; double a; int i; /* Unpack */ for (i=0;i<n/2;i++){ temp[i*2]=x[i]; temp[i*2+1]=x[i+n/2]; } for (i=0;i<n;i++) x[i]=temp[i]; /* Undo scale */ a=1.149604398; for (i=0;i<n;i++) { if (i%2) x[i]*=a; else x[i]/=a; } /* Undo update 2 */ a=-0.4435068522; for (i=2;i<n;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[0]+=2*a*x[1]; /* Undo predict 2 */ a=-0.8829110762; for (i=1;i<n-2;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[n-1]+=2*a*x[n-2]; /* Undo update 1 */ a=0.05298011854; for (i=2;i<n;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[0]+=2*a*x[1]; /* Undo predict 1 */ a=1.586134342; for (i=1;i<n-2;i+=2) x[i]+=a*(x[i-1]+x[i+1]); x[n-1]+=2*a*x[n-2]; } /* multistage decomposition */ void fwt97(double *out, int n, double *in, int support){ int i=n,j=support,k; while((i&1)==0){ fwt97_i(in,j); i>>=1; for(k=0;k<i;k++) out[i+k] = in[((((j*3)>>1)-i)>>1) + k]; j>>=1; } for(k=0;k<i;k++) out[k] = in[((j-i)>>1) + k]; } /* multistage reconstruction */ void iwt97(double *out, int support, double *in, int n){ int i=n,j=support,k; for(k=0;k<support;k++) out[k]=0; while((i&1)==0){ i>>=1; for(k=0;k<i;k++) out[((((j*3)>>1)-i)>>1) + k]=in[i+k]; j>>=1; } for(k=0;k<i;k++) out[((j-i)>>1) + k]=in[k]; i<<=1; j<<=1; while(j<=support){ iwt97_i(out,j); i<<=1; j<<=1; } } #endif #if USE_KLT void symeigen(double *out, double *cov, int support){ int i; int j; int k; for(i=0;i<support;i++) for(j=0;j<support;j++) out[i*support+j]=i==j; for(;;){ double mod=0.; for(i=0,j=0,k=0;k<support;k++){ int m; for(m=k+1;m<support;m++){ double q; q=fabs(cov[k*support+m]); if(q>mod){ mod=q; i=k; j=m; } } } if(mod<1E-11)break; { double th=0.5*atan2(2*cov[i*support+j],cov[i*support+i]-cov[j*support+j]); double c=cos(th); double s=sin(th); for(k=0;k<support;k++){ double t; t=c*cov[k*support+i]+s*cov[k*support+j]; cov[k*support+j]=-s*cov[k*support+i]+c*cov[k*support+j]; cov[k*support+i]=t; } for(k=0;k<support;k++){ double t; t=c*cov[i*support+k]+s*cov[j*support+k]; cov[j*support+k]=-s*cov[i*support+k]+c*cov[j*support+k]; cov[i*support+k]=t; } for(k=0;k<support;k++){ double t; t=c*out[i*support+k]+s*out[j*support+k]; out[j*support+k]=-s*out[i*support+k]+c*out[j*support+k]; out[i*support+k]=t; } } } /* for(j=0;j<BLOCKSIZE;j++)eigenvalue[j]=cov[j][j]; don't need eigenvalues */ } void flap_2d(double out[BLOCKSIZE][BLOCKSIZE], double in[SUPPORT][SUPPORT], const int _f[]){ int i,j; #if USE_LAPPING #if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES /* columns */ for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ double work[BLOCKSIZE*2]; for(j=0;j<BLOCKSIZE*2;j++) work[j]=in[j+SUPPORT/2-BLOCKSIZE][i]; (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[0],&work[0],_f); (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[BLOCKSIZE],&work[BLOCKSIZE],_f); for(j=0;j<BLOCKSIZE*2;j++) in[j+SUPPORT/2-BLOCKSIZE][i]=work[j]; } /* rows */ for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&in[i][SUPPORT/2-BLOCKSIZE],&in[i][SUPPORT/2-BLOCKSIZE],_f); (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&in[i][SUPPORT/2],&in[i][SUPPORT/2],_f); } #else # error "Need a prefilter implementation for this block size." #endif #endif for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) out[i][j]=in[i+SUPPORT/2-BLOCKSIZE/2][j+SUPPORT/2-BLOCKSIZE/2]; } void ilap_2d(double out[SUPPORT][SUPPORT], double in[BLOCKSIZE][BLOCKSIZE], const int _f[]){ int i,j; for(i=0;i<SUPPORT;i++) for(j=0;j<SUPPORT;j++) out[i][j]=0; for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) out[i+SUPPORT/2-BLOCKSIZE/2][j+SUPPORT/2-BLOCKSIZE/2]=in[i][j]; #if USE_LAPPING #if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES /* columns */ for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ double work[BLOCKSIZE*2]; for(j=0;j<BLOCKSIZE*2;j++) work[j]=out[j+SUPPORT/2-BLOCKSIZE][i]; (*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[0],&work[0],_f); (*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[BLOCKSIZE],&work[BLOCKSIZE],_f); for(j=0;j<BLOCKSIZE*2;j++) out[j+SUPPORT/2-BLOCKSIZE][i]=work[j]; } /* rows */ for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ (*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&out[i][SUPPORT/2-BLOCKSIZE],&out[i][SUPPORT/2-BLOCKSIZE],_f); (*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&out[i][SUPPORT/2],&out[i][SUPPORT/2],_f); } #else # error "Need a prefilter implementation for this block size." #endif #endif } void flap_4d(double out[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE], double in[SUPPORT][SUPPORT][SUPPORT][SUPPORT], const int _f[]){ int i,j,k,l; #if USE_LAPPING #if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){ for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){ double work[BLOCKSIZE*2]; /* [ ][i][j][k] */ for(l=0;l<BLOCKSIZE*2;l++) work[l]=in[l+SUPPORT/2-BLOCKSIZE][i][j][k]; (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[0],&work[0],_f); (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[BLOCKSIZE],&work[BLOCKSIZE],_f); for(l=0;l<BLOCKSIZE*2;l++) in[l+SUPPORT/2-BLOCKSIZE][i][j][k]=work[l]; } } } for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){ for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){ double work[BLOCKSIZE*2]; /* [i][ ][j][k] */ for(l=0;l<BLOCKSIZE*2;l++) work[l]=in[i][l+SUPPORT/2-BLOCKSIZE][j][k]; (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[0],&work[0],_f); (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[BLOCKSIZE],&work[BLOCKSIZE],_f); for(l=0;l<BLOCKSIZE*2;l++) in[i][l+SUPPORT/2-BLOCKSIZE][j][k]=work[l]; } } } for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){ for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){ double work[BLOCKSIZE*2]; /* [i][j][ ][k] */ for(l=0;l<BLOCKSIZE*2;l++) work[l]=in[i][j][l+SUPPORT/2-BLOCKSIZE][k]; (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[0],&work[0],_f); (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&work[BLOCKSIZE],&work[BLOCKSIZE],_f); for(l=0;l<BLOCKSIZE*2;l++) in[i][j][l+SUPPORT/2-BLOCKSIZE][k]=work[l]; } } } for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){ for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){ for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){ /* [i][j][k][ ] */ (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&in[i][j][k][SUPPORT/2-BLOCKSIZE],&in[i][j][k][SUPPORT/2-BLOCKSIZE],_f); (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&in[i][j][k][SUPPORT/2],&in[i][j][k][SUPPORT/2],_f); } } } #else # error "Need a prefilter implementation for this block size." #endif #endif for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) for(k=0;k<BLOCKSIZE;k++) for(l=0;l<BLOCKSIZE;l++) out[i*BLOCKSIZE+j][k*BLOCKSIZE+l]=in [i+SUPPORT/2-BLOCKSIZE/2] [j+SUPPORT/2-BLOCKSIZE/2] [k+SUPPORT/2-BLOCKSIZE/2] [l+SUPPORT/2-BLOCKSIZE/2]; } void gklt_1d(double klt[BLOCKSIZE][BLOCKSIZE], double cov[SUPPORT][SUPPORT], const int *_f){ static double workA[SUPPORT][SUPPORT]; static double workB[BLOCKSIZE][BLOCKSIZE]; int i,j; for(i=0;i<SUPPORT;i++) for(j=0;j<SUPPORT;j++) workA[i][j]=cov[i][j]; flap_2d(workB,workA,_f); symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE); } void gklt_2d(double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE], double cov[SUPPORT][SUPPORT][SUPPORT][SUPPORT], const int *_f){ static double workA[SUPPORT][SUPPORT][SUPPORT][SUPPORT]; static double workB[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]; int i,j,k,l; for(i=0;i<SUPPORT;i++) for(j=0;j<SUPPORT;j++) for(k=0;k<SUPPORT;k++) for(l=0;l<SUPPORT;l++) workA[i][j][k][l]=cov[i][j][k][l]; flap_4d(workB,workA,_f); symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE*BLOCKSIZE); } void gklt_1d_collapsed(double klt[BLOCKSIZE][BLOCKSIZE], double cov[SUPPORT], const int *_f){ static double workA[SUPPORT][SUPPORT]; static double workB[BLOCKSIZE][BLOCKSIZE]; int i,j; for(i=0;i<SUPPORT;i++) for(j=0;j<SUPPORT;j++) workA[i][j]=cov[abs(i-j)]; flap_2d(workB,workA,_f); symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE); } void gklt_2d_collapsed(double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE], double cov[SUPPORT][SUPPORT], const int *_f){ static double workA[SUPPORT][SUPPORT][SUPPORT][SUPPORT]; static double workB[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]; int i,j,k,l; for(i=0;i<SUPPORT;i++) for(j=0;j<SUPPORT;j++) for(k=0;k<SUPPORT;k++) for(l=0;l<SUPPORT;l++) workA[i][j][k][l]=cov[abs(i-k)][abs(j-l)]; flap_4d(workB,workA,_f); symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE*BLOCKSIZE); } void fklt(double *out, double *in, double *klt, int support){ int i,j; for(i=0;i<support;i++){ double acc=0.; for(j=0;j<support;j++) acc += klt[i*support+j]*in[j]; out[i]=acc; } } void iklt(double *out, double *in, double *klt, int support){ int i,j; for(i=0;i<support;i++){ double acc=0.; for(j=0;j<support;j++) acc+=klt[j*support+i]*in[j]; out[i]=acc; } } #endif void b_analysis_1d(double *_out,int _out_stride,const double *_in,int _in_stride, const int *_f, double _klt[BLOCKSIZE][BLOCKSIZE]){ int j; double t[SUPPORT]; double w[BLOCKSIZE]; for(j=0;j<SUPPORT;j++) t[j]=_in[j*_in_stride]; #if USE_WAVELET fwt97(w,BLOCKSIZE,t,SUPPORT); for(j=0;j<BLOCKSIZE;j++){ _out[j*_out_stride]=w[j]; } #else # if USE_LAPPING # if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&t[SUPPORT/2-BLOCKSIZE],&t[SUPPORT/2-BLOCKSIZE],_f); (*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&t[SUPPORT/2],&t[SUPPORT/2],_f); # else # error "Need a prefilter implementation for this block size." # endif # endif # if USE_KLT fklt(&w[0],&t[SUPPORT/2-BLOCKSIZE/2],&_klt[0][0],BLOCKSIZE); # elif USE_DCT # if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*OD_FDCT_1D_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (w,&t[SUPPORT/2-BLOCKSIZE/2],1); # else # error "Need an fDCT implementation for this block size." # endif # else for(j=0;j<BLOCKSIZE;j++) w[j]=t[j+SUPPORT/2-BLOCKSIZE/2]; # endif for(j=0;j<BLOCKSIZE;j++) _out[j*_out_stride]=w[j]; #endif } void b_analysis_2d(double *_out,int _out_stride_i,int _out_stride_j, const double *_in,int _in_stride_i,int _in_stride_j, const int *_f, double _klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]){ #if USE_KLT /* KLT is a non-separable 2D transform */ double lap[SUPPORT][SUPPORT]; double work[BLOCKSIZE][BLOCKSIZE]; double temp[BLOCKSIZE][BLOCKSIZE]; int i,j; for(i=0;i<SUPPORT;i++) for(j=0;j<SUPPORT;j++) lap[i][j]=*(_in+i*_in_stride_i+j*_in_stride_j); flap_2d(work,lap,_f); fklt(&temp[0][0],&work[0][0],&_klt[0][0],BLOCKSIZE*BLOCKSIZE); for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) *(_out+i*_out_stride_i+j*_out_stride_j)=temp[i][j]; #else double work[SUPPORT][BLOCKSIZE]; int i; /* DCT and DWT are separable 1D transforms */ /* lapping performed inside b_analysis */ for(i=0;i<SUPPORT;i++) b_analysis_1d(&work[i][0],1,_in+i*_in_stride_i,_in_stride_j,_f,NULL); for(i=0;i<BLOCKSIZE;i++) b_analysis_1d(_out+_out_stride_i*i,_out_stride_j,&work[0][i],BLOCKSIZE,_f,NULL); #endif } void b_synthesis_1d(double *_out,int _out_stride,const double *_in,int _in_stride, const int *_f, double _klt[BLOCKSIZE][BLOCKSIZE]){ int j; double w[SUPPORT]; double t[SUPPORT]; for(j=0;j<SUPPORT;j++){ t[j]=0; w[j]=0; } #if USE_WAVELET for(j=0;j<BLOCKSIZE;j++) w[j]=_in[j*_in_stride]; iwt97(t,SUPPORT,w,BLOCKSIZE); #else for(j=0;j<BLOCKSIZE;j++){ w[SUPPORT/2-BLOCKSIZE/2+j]=_in[j*_in_stride]; } # if USE_KLT iklt(&t[SUPPORT/2-BLOCKSIZE/2],&w[SUPPORT/2-BLOCKSIZE/2],&_klt[0][0],BLOCKSIZE); # elif USE_DCT # if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*OD_IDCT_1D_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&t[SUPPORT/2-BLOCKSIZE/2],1,&w[SUPPORT/2-BLOCKSIZE/2]); # else # error "Need an iDCT implementation for this block size." # endif # else for(j=0;j<SUPPORT;j++) t[j]=w[j]; # endif # if USE_LAPPING # if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES (*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&t[SUPPORT/2-BLOCKSIZE],&t[SUPPORT/2-BLOCKSIZE],_f); (*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0]) (&t[SUPPORT/2],&t[SUPPORT/2],_f); # else # error "Need a postfilter implementation for this block size." # endif # endif #endif for(j=0;j<SUPPORT;j++) _out[j*_out_stride]=t[j]; } void b_synthesis_2d(double *_out,int _out_stride_i,int _out_stride_j, const double *_in,int _in_stride_i,int _in_stride_j, const int *_f, double _klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]){ #if USE_KLT /* KLT is a non-separable 2D transform */ double temp[BLOCKSIZE][BLOCKSIZE]; double work[BLOCKSIZE][BLOCKSIZE]; double lap[SUPPORT][SUPPORT]; int i,j; for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) temp[i][j]=*(_in+i*_in_stride_i+j*_in_stride_j); iklt(&work[0][0],&temp[0][0],&_klt[0][0],BLOCKSIZE*BLOCKSIZE); ilap_2d(lap,work,_f); for(i=0;i<SUPPORT;i++) for(j=0;j<SUPPORT;j++) *(_out+i*_out_stride_i+j*_out_stride_j)=lap[i][j]; #else double work[SUPPORT][BLOCKSIZE]; int i; /* DCT and DWT are separable 1D transforms */ /* lapping performed inside b_analysis */ for(i=0;i<BLOCKSIZE;i++) b_synthesis_1d(&work[0][i],BLOCKSIZE,_in+i*_in_stride_i,_in_stride_j,_f,NULL); for(i=0;i<SUPPORT;i++) b_synthesis_1d(_out+_out_stride_i*i,_out_stride_j,&work[i][0],1,_f,NULL); #endif } #if USE_2D static double cg_2d_i(double rggt[SUPPORT][SUPPORT][BLOCKSIZE][BLOCKSIZE], const int *_f, double _klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]){ double r[BLOCKSIZE][BLOCKSIZE][BLOCKSIZE][BLOCKSIZE]; double s[SUPPORT][BLOCKSIZE]; double ggrggt[BLOCKSIZE][BLOCKSIZE][BLOCKSIZE][BLOCKSIZE]; double cg=0; int i; int j; int v; int u; int k; int l; /* G1*P*G2*R*(G2*P*G1)^T */ for(v=0;v<BLOCKSIZE;v++) for(j=0;j<BLOCKSIZE;j++) b_analysis_2d(&ggrggt[v][j][0][0], 1,BLOCKSIZE, &rggt[0][0][v][j], BLOCKSIZE*BLOCKSIZE*SUPPORT, BLOCKSIZE*BLOCKSIZE, f,_klt); /* H1*P*H2 */ for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) for(k=0;k<BLOCKSIZE;k++) for(l=0;l<BLOCKSIZE;l++) r[i][j][k][l] = (i*BLOCKSIZE+j==k*BLOCKSIZE+l)?1:0; for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) b_synthesis_2d(&rggt[0][0][i][j], BLOCKSIZE*BLOCKSIZE, SUPPORT*BLOCKSIZE*BLOCKSIZE, &r[i][j][0][0], BLOCKSIZE,1, _f,_klt); /* ((H1*P*H2)^T*H1*P*H2)_ii */ for(i=0;i<BLOCKSIZE;i++){ for(j=0;j<BLOCKSIZE;j++){ s[i][j]=0; for(u=0;u<SUPPORT;u++){ for(v=0;v<SUPPORT;v++){ s[i][j]+=rggt[u][v][i][j]*rggt[u][v][i][j]; } } } } /* (G1*P*G2*R*(G1*P*G2)^T)_ii * ((H1*P*H2)^T*H1*P*H2)_ii */ for(i=0;i<BLOCKSIZE;i++) for(j=0;j<BLOCKSIZE;j++) cg-=10*log10(ggrggt[i][j][i][j]*s[i][j]); return cg/(BLOCKSIZE*BLOCKSIZE); } double cg_2d(double _in[SUPPORT][SUPPORT][SUPPORT][SUPPORT], const int *_f){ int v; int j; double ret; double (*rggt)[SUPPORT][BLOCKSIZE][BLOCKSIZE] = malloc(SUPPORT*SUPPORT*BLOCKSIZE*BLOCKSIZE*sizeof(****rggt)); double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]; #if USE_KLT gklt_2d(klt,_in,_f); #endif /* R*(G2*P*G1)^T */ for(v=0;v<SUPPORT;v++) for(j=0;j<SUPPORT;j++) b_analysis_2d(&rggt[v][j][0][0], 1,BLOCKSIZE, &_in[0][0][v][j], SUPPORT*SUPPORT*SUPPORT, SUPPORT*SUPPORT, _f,klt); ret = cg_2d_i(rggt,f,klt); free(rggt); return ret; } double cg_2d_collapsed(double _in[SUPPORT][SUPPORT],const int *_f){ int v; int u; int j; int i; double ret; double r[SUPPORT][SUPPORT]; double (*rggt)[SUPPORT][BLOCKSIZE][BLOCKSIZE] = malloc(SUPPORT*SUPPORT*BLOCKSIZE*BLOCKSIZE*sizeof(****rggt)); double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]; #if USE_KLT gklt_2d_collapsed(klt,_in,_f); #endif /* R*(G2*P*G1)^T */ for(v=0;v<SUPPORT;v++){ for(j=0;j<SUPPORT;j++){ for(u=0;u<SUPPORT;u++) for(i=0;i<SUPPORT;i++) r[u][i]=_in[abs(u-v)][abs(i-j)]; b_analysis_2d(&rggt[v][j][0][0], 1,BLOCKSIZE, &r[0][0],SUPPORT,1,_f,klt); } } ret = cg_2d_i(rggt,f,klt); free(rggt); return ret; } #else static double cg_1d_i(double rgt[SUPPORT][BLOCKSIZE], const int *_f, double klt[BLOCKSIZE][BLOCKSIZE]){ int j; int i; double r[BLOCKSIZE]; double grgt[BLOCKSIZE][BLOCKSIZE]; double cg=0; /* G*R*G^T */ for(i=0;i<BLOCKSIZE;i++) b_analysis_1d(&grgt[0][i],BLOCKSIZE,&rgt[0][i],BLOCKSIZE,_f,klt); /* H */ for(j=0;j<BLOCKSIZE;j++){ for(i=0;i<BLOCKSIZE;i++){ r[i]=i==j?1:0; } b_synthesis_1d(&rgt[0][j],BLOCKSIZE,r,1,_f,klt); } /* (G*R*G^T)_ii * (H^T*H)_ii */ for(j=0;j<BLOCKSIZE;j++){ double h=0; for(i=0;i<SUPPORT;i++){ h+=rgt[i][j]*rgt[i][j]; } cg-=10*log10(grgt[j][j]*h); } return cg/BLOCKSIZE; } double cg_1d(double in[SUPPORT][SUPPORT],const int *_f){ int j; double rgt[SUPPORT][BLOCKSIZE]; double klt[BLOCKSIZE][BLOCKSIZE]; #if USE_KLT gklt_1d(klt,in,_f); #endif /* R*G^T */ for(j=0;j<SUPPORT;j++){ b_analysis_1d(&rgt[j][0],1,in[j],1,_f,klt); } return cg_1d_i(rgt,f,klt); } double cg_1d_collapsed(double in[SUPPORT],const int *_f){ int j; int i; double r[SUPPORT]; double rgt[SUPPORT][BLOCKSIZE]; double klt[BLOCKSIZE][BLOCKSIZE]; #if USE_KLT gklt_1d_collapsed(klt,in,_f); #endif /* R*G^T */ for(j=0;j<SUPPORT;j++){ for(i=0;i<SUPPORT;i++){ r[i]=in[abs(i-j)]; } b_analysis_1d(&rgt[j][0],1,r,1,_f,klt); } return cg_1d_i(rgt,f,klt); } #endif #if USE_FILES int main(int _argc,const char *_argv[]){ cov_state cvs[NUM_PROCS]; #if COMPUTE_NATHAN trans_ctx ctx[NUM_PROCS]; double r[SUPPORT*SUPPORT]; /* maximum for 2d */ #else trans_ctx *ctx=NULL; #endif int i; #if BLOCKSIZE==4 f=OD_FILTER_PARAMS4; #elif BLOCKSIZE==8 f=OD_FILTER_PARAMS8; #elif BLOCKSIZE==16 f=OD_FILTER_PARAMS16; #else # error "Need filter params for this block size." #endif for(i=0;i<NUM_PROCS;i++){ #if USE_2D cov_init(&cvs[i],SUPPORT*SUPPORT); #else cov_init(&cvs[i],SUPPORT); #endif } #if COMPUTE_NATHAN for(i=0;i<NUM_PROCS;i++){ #if USE_2D trans_data_init(&ctx[i].td,SUPPORT*SUPPORT); #else trans_data_init(&ctx[i].td,SUPPORT); #endif } #endif OD_OMP_SET_THREADS(NUM_PROCS); process_files(ctx,cvs,_argc,_argv); for(i=1;i<NUM_PROCS;i++) cov_combine(&cvs[0],&cvs[i]); cov_compute(&cvs[0]); #if COMPUTE_NATHAN for(i=1;i<NUM_PROCS;i++) trans_data_combine(&ctx[0].td,&ctx[i].td); trans_data_normalize(&ctx[0].td); #endif #if PRINT_COV { int i,j; fprintf(stdout,"collapsed_cov=\n"); for(j=0;j<cvs[0].sz/SUPPORT;j++){ for(i=0;i<SUPPORT;i++){ fprintf(stdout,"%s %- 12.6G",i>0?",":"",cvs[0].cov[j*SUPPORT+i]); } fprintf(stdout,"\n"); } } #endif #if USE_2D #if COMPUTE_NATHAN fprintf(stdout,"original cg=%-24.16G\n", cg_2d((double(*)[SUPPORT][SUPPORT][SUPPORT])ctx[0].td.cov,f)); trans_data_collapse(&ctx[0].td,SUPPORT,r); fprintf(stdout,"collapse cg=%-24.16G\n", cg_2d_collapsed((double(*)[SUPPORT])r,f)); #endif fprintf(stdout,"monty cg=%-24.16G\n", cg_2d_collapsed((double(*)[SUPPORT])cvs[0].cov,f)); #else #if COMPUTE_NATHAN fprintf(stdout,"original cg=%-24.16G\n", cg_1d((double (*)[SUPPORT])ctx[0].td.cov,f)); trans_data_collapse(&ctx[0].td,1,r); fprintf(stdout,"collapse cg=%-24.16G\n", cg_1d_collapsed(r,f)); #endif fprintf(stdout,"monty cg=%-24.16G\n", cg_1d_collapsed(cvs[0].cov,f)); #endif for(i=0;i<NUM_PROCS;i++) cov_clear(&cvs[i]); #if COMPUTE_NATHAN for(i=0;i<NUM_PROCS;i++) trans_data_clear(&ctx[i].td); #endif return EXIT_SUCCESS; } #else int main(int _argc,const char *_argv[]){ #if USE_2D double cov[SUPPORT][SUPPORT]; double *r=&cov[0][0]; #else double cov[SUPPORT]; double *r=&cov[0]; #endif #if BLOCKSIZE==4 f=OD_FILTER_PARAMS4; #elif BLOCKSIZE==8 f=OD_FILTER_PARAMS8; #elif BLOCKSIZE==16 f=OD_FILTER_PARAMS16; #else # error "Need filter params for this block size." #endif # if USE_2D auto_regressive_collapsed(r,SUPPORT*SUPPORT,SUPPORT,0.95); fprintf(stdout,"AR p=.95 cg=%-24.18G\n",cg_2d_collapsed(cov,f)); # else auto_regressive_collapsed(r,SUPPORT,1,0.95); fprintf(stdout,"AR p=.95 cg=%-24.18G\n",cg_1d_collapsed(cov,f)); # endif return EXIT_SUCCESS; } #endif
BucketOP.h
#ifndef BucketOP #define BucketOP /* * BucketOP.h: * a bucket operation, for padding mainly * usually an inputleaf node, degree = 0 * * Created on: Apr 21, 2017 * Author: mszhang */ #include "Eigen/Dense" #include "MyLib.h" #include "Node.h" #include "Graph.h" using namespace Eigen; class BucketNode : public Node { public: BucketNode() : Node() { node_type = "bucket"; } public: virtual inline void clearValue() { //Node::clearValue(); loss = 0; degree = 0; if (drop_value > 0)drop_mask = 1; parents.clear(); } virtual inline void init(int ndim, dtype dropout) { Node::init(ndim, -1); } public: void forward(Graph *cg, dtype value) { val = value; loss = 0; degree = 0; cg->addNode(this); } //value already assigned void forward(Graph *cg) { loss = 0; degree = 0; cg->addNode(this); } inline void compute() { } inline void backward() { } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { return Node::typeEqual(other); } }; class BucketExecute : public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); } } }; inline PExecute BucketNode::generate(bool bTrain, dtype cur_drop_factor) { BucketExecute* exec = new BucketExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; } #endif
GB_unop__identity_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_int8 // op(A') function: GB_unop_tran__identity_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_int8 ( uint64_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_H_ #define LIGHTGBM_UTILS_COMMON_H_ #if ((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))) #include <LightGBM/utils/common_legacy_solaris.h> #endif #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <chrono> #include <cmath> #include <cstdint> #include <cstdio> #include <cstring> #include <functional> #include <iomanip> #include <iterator> #include <map> #include <memory> #include <sstream> #include <type_traits> #include <unordered_map> #include <utility> #include <vector> #if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))) #define FMT_HEADER_ONLY #include "LightGBM/fmt/format.h" #endif #include "LightGBM/fast_double_parser.h" #ifdef _MSC_VER #include <intrin.h> #pragma intrinsic(_BitScanReverse) #endif #if defined(_MSC_VER) #include <malloc.h> #elif MM_MALLOC #include <mm_malloc.h> // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html // https://www.oreilly.com/library/view/mac-os-x/0596003560/ch05s01s02.html #elif defined(__GNUC__) && defined(HAVE_MALLOC_H) #include <malloc.h> #define _mm_malloc(a, b) memalign(b, a) #define _mm_free(a) free(a) #else #include <stdlib.h> #define _mm_malloc(a, b) malloc(a) #define _mm_free(a) free(a) #endif namespace LightGBM { namespace Common { /*! * Imbues the stream with the C locale. */ static void C_stringstream(std::stringstream &ss) { ss.imbue(std::locale::classic()); } inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitBrackets(const char* c_str, char left_delimiter, char right_delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; bool open = false; while (pos < str.length()) { if (str[pos] == left_delimiter) { open = true; ++pos; i = pos; } else if (str[pos] == right_delimiter && open) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } open = false; ++pos; } else { ++pos; } } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<std::vector<T>> StringToArrayofArrays( const std::string& str, char left_bracket, char right_bracket, char delimiter) { std::vector<std::string> strs = SplitBrackets(str.c_str(), left_bracket, right_bracket); std::vector<std::vector<T>> ret; for (const auto& s : strs) { ret.push_back(StringToArray<T>(s, delimiter)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK_EQ(strs.size(), static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter, const bool force_C_locale = false) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; if (force_C_locale) { C_stringstream(str_buf); } str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter, const bool force_C_locale) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; if (force_C_locale) { C_stringstream(str_buf); } str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter, const bool force_C_locale = false) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; if (force_C_locale) { C_stringstream(str_buf); } str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = OMP_NUM_THREADS(); if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY); } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckAllowedJSON(const std::string& s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } inline int RoundInt(double x) { return static_cast<int>(x + 0.5f); } template <typename T, std::size_t N = 32> class AlignmentAllocator { public: typedef T value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; inline AlignmentAllocator() throw() {} template <typename T2> inline AlignmentAllocator(const AlignmentAllocator<T2, N>&) throw() {} inline ~AlignmentAllocator() throw() {} inline pointer adress(reference r) { return &r; } inline const_pointer adress(const_reference r) const { return &r; } inline pointer allocate(size_type n) { return (pointer)_mm_malloc(n * sizeof(value_type), N); } inline void deallocate(pointer p, size_type) { _mm_free(p); } inline void construct(pointer p, const value_type& wert) { new (p) value_type(wert); } inline void destroy(pointer p) { p->~value_type(); } inline size_type max_size() const throw() { return size_type(-1) / sizeof(value_type); } template <typename T2> struct rebind { typedef AlignmentAllocator<T2, N> other; }; bool operator!=(const AlignmentAllocator<T, N>& other) const { return !(*this == other); } // Returns true if and only if storage allocated from *this // can be deallocated from other, and vice versa. // Always returns true for stateless allocators. bool operator==(const AlignmentAllocator<T, N>&) const { return true; } }; class Timer { public: Timer() { #ifdef TIMETAG int num_threads = OMP_NUM_THREADS(); start_time_.resize(num_threads); stats_.resize(num_threads); #endif // TIMETAG } ~Timer() { Print(); } #ifdef TIMETAG void Start(const std::string& name) { auto tid = omp_get_thread_num(); start_time_[tid][name] = std::chrono::steady_clock::now(); } void Stop(const std::string& name) { auto cur_time = std::chrono::steady_clock::now(); auto tid = omp_get_thread_num(); if (stats_[tid].find(name) == stats_[tid].end()) { stats_[tid][name] = std::chrono::duration<double, std::milli>(0); } stats_[tid][name] += cur_time - start_time_[tid][name]; } #else void Start(const std::string&) {} void Stop(const std::string&) {} #endif // TIMETAG void Print() const { #ifdef TIMETAG std::unordered_map<std::string, std::chrono::duration<double, std::milli>> stats(stats_[0].begin(), stats_[0].end()); for (size_t i = 1; i < stats_.size(); ++i) { for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it) { if (stats.find(it->first) == stats.end()) { stats[it->first] = it->second; } else { stats[it->first] += it->second; } } } std::map<std::string, std::chrono::duration<double, std::milli>> ordered( stats.begin(), stats.end()); for (auto it = ordered.begin(); it != ordered.end(); ++it) { Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3); } #endif // TIMETAG } #ifdef TIMETAG std::vector< std::unordered_map<std::string, std::chrono::steady_clock::time_point>> start_time_; std::vector<std::unordered_map<std::string, std::chrono::duration<double, std::milli>>> stats_; #endif // TIMETAG }; // Note: this class is not thread-safe, don't use it inside omp blocks class FunctionTimer { public: #ifdef TIMETAG FunctionTimer(const std::string& name, Timer& timer) : timer_(timer) { timer.Start(name); name_ = name; } ~FunctionTimer() { timer_.Stop(name_); } private: std::string name_; Timer& timer_; #else FunctionTimer(const std::string&, Timer&) {} #endif // TIMETAG }; } // namespace Common extern Common::Timer global_timer; /*! * Provides locale-independent alternatives to Common's methods. * Essential to make models robust to locale settings. */ namespace CommonC { template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { return LightGBM::Common::Join(strs, delimiter, true); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { return LightGBM::Common::Join(strs, start, end, delimiter, true); } inline static const char* Atof(const char* p, double* out) { return LightGBM::Common::Atof(p, out); } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return LightGBM::Common::Atoi(p, out); } }; /*! * \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``, * has **less** floating point precision than ``__StringToTHelper``. * Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision. * Check ``StringToArrayFast`` and ``StringToArray`` for more details on this. */ template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; LightGBM::Common::Atoi(str.c_str(), &ret); return ret; } }; /*! * \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``, * has **less** floating point precision than ``__StringToTHelper``. * Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision. * Check ``StringToArrayFast`` and ``StringToArray`` for more details on this. * \note It is possible that ``fast_double_parser::parse_number`` is faster than ``Common::Atof``. */ template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { double tmp; // Fast (common) path: For numeric inputs in RFC 7159 format: const bool fast_parse_succeeded = fast_double_parser::parse_number(str.c_str(), &tmp); // Rare path: Not in RFC 7159 format. Possible "inf", "nan", etc. Fallback to standard library: if (!fast_parse_succeeded) { std::stringstream ss; Common::C_stringstream(ss); ss << str; ss >> tmp; } return static_cast<T>(tmp); } }; /*! * \warning Beware that due to internal use of ``Common::Atof`` in ``__StringToTHelperFast``, * this method has less precision for floating point numbers than ``StringToArray``, * which calls ``__StringToTHelper``. * As such, ``StringToArrayFast`` and ``StringToArray`` are not equivalent! * Both versions were kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision. */ template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } /*! * \warning Do not replace calls to this method by ``StringToArrayFast``. * This method is more precise for floating point numbers. * Check ``StringToArrayFast`` for more details. */ template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), ' '); CHECK_EQ(strs.size(), static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } /*! * \warning Do not replace calls to this method by ``StringToArrayFast``. * This method is more precise for floating point numbers. * Check ``StringToArrayFast`` for more details. */ template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } #if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))) /*! * Safely formats a value onto a buffer according to a format string and null-terminates it. * * \note It checks that the full value was written or forcefully aborts. * This safety check serves to prevent incorrect internal API usage. * Correct usage will never incur in this problem: * - The received buffer size shall be sufficient at all times for the input format string and value. */ template <typename T> inline static void format_to_buf(char* buffer, const size_t buf_len, const char* format, const T value) { auto result = fmt::format_to_n(buffer, buf_len, format, value); if (result.size >= buf_len) { Log::Fatal("Numerical conversion failed. Buffer is too small."); } buffer[result.size] = '\0'; } template<typename T, bool is_float, bool high_precision> struct __TToStringHelper { void operator()(T value, char* buffer, size_t buf_len) const { format_to_buf(buffer, buf_len, "{}", value); } }; template<typename T> struct __TToStringHelper<T, true, false> { void operator()(T value, char* buffer, size_t buf_len) const { format_to_buf(buffer, buf_len, "{:g}", value); } }; template<typename T> struct __TToStringHelper<T, true, true> { void operator()(T value, char* buffer, size_t buf_len) const { format_to_buf(buffer, buf_len, "{:.17g}", value); } }; /*! * Converts an array to a string with with values separated by the space character. * This method replaces Common's ``ArrayToString`` and ``ArrayToStringFast`` functionality * and is locale-independent. * * \note If ``high_precision_output`` is set to true, * floating point values are output with more digits of precision. */ template<bool high_precision_output = false, typename T> inline static std::string ArrayToString(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelper<T, std::is_floating_point<T>::value, high_precision_output> helper; const size_t buf_len = high_precision_output ? 32 : 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; Common::C_stringstream(str_buf); helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } #endif // (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))) } // namespace CommonC } // namespace LightGBM #endif // LIGHTGBM_UTILS_COMMON_H_
dpado.202001071517.push_back_ShortIndex.h
// // Created by Zhen Peng on 1/6/20. // #ifndef PADO_DPADO_H #define PADO_DPADO_H #include <vector> //#include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> //#include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <math.h> #include <fstream> #include <omp.h> #include "globals.h" #include "dglobals.h" #include "dgraph.h" namespace PADO { template <VertexID BATCH_SIZE = 1024> class DistBVCPLL { private: static const VertexID BITPARALLEL_SIZE = 50; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { struct Batch { VertexID batch_id; // Batch ID VertexID start_index; // Index to the array distances where the batch starts VertexID size; // Number of distances element in this batch Batch() = default; Batch(VertexID batch_id_, VertexID start_index_, VertexID size_): batch_id(batch_id_), start_index(start_index_), size(size_) { } }; struct DistanceIndexType { VertexID start_index; // Index to the array vertices where the same-ditance vertices start VertexID size; // Number of the same-distance vertices UnweightedDist dist; // The real distance DistanceIndexType() = default; DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_): start_index(start_index_), size(size_), dist(dist_) { } }; // Bit-parallel Labels UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} std::vector<Batch> batches; // Batch info std::vector<DistanceIndexType> distances; // Distance info std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID size_t size() const { return sizeof(bp_dist) + sizeof(bp_sets) + batches.size() * sizeof(Batch) + distances.size() * sizeof(DistanceIndexType) + vertices.size() * sizeof(VertexID); } }; //__attribute__((aligned(64))); struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates // std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE); // VertexID end_candidates_que = 0; // // Cannot used for multiple-thread inserting. std::vector<VertexID> candidates_que; std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0); void indicator_reset() { std::fill(indicator.begin(), indicator.end(), 0); } }; //__attribute__((aligned(64))); // Type of Bit-Parallel Label struct BPLabelType { UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 }; uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0} }; // Type of Label Message Unit, for initializing distance table struct LabelTableUnit { VertexID root_id; VertexID label_global_id; UnweightedDist dist; LabelTableUnit() = default; LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : root_id(r), label_global_id(l), dist(d) {} }; // Type of BitParallel Label Message Unit for initializing bit-parallel labels struct MsgBPLabel { VertexID r_root_id; UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; MsgBPLabel() = default; MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) : r_root_id(r) { memcpy(bp_dist, dist, sizeof(bp_dist)); memcpy(bp_sets, sets, sizeof(bp_sets)); } }; VertexID num_v = 0; VertexID num_masters = 0; // VertexID BATCH_SIZE = 0; int host_id = 0; int num_hosts = 0; MPI_Datatype V_ID_Type; std::vector<IndexType> L; inline void bit_parallel_push_labels( const DistGraph &G, VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, UnweightedDist iter); inline void bit_parallel_labeling( const DistGraph &G, std::vector<uint8_t> &used_bp_roots); // inline void bit_parallel_push_labels( // const DistGraph &G, // VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // UnweightedDist iter); // inline void bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots); inline void batch_process( const DistGraph &G, const VertexID b_id, const VertexID roots_start, const VertexID roots_size, const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated); // std::vector<bool> &once_candidated); inline VertexID initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots); // inline void push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter); inline void local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); inline void local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); // inline void local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline bool distance_query( VertexID cand_root_id, VertexID v_id, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter); inline void insert_label_only_seq( VertexID cand_root_id, VertexID v_id, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send); // UnweightedDist iter); inline void insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send); inline void update_label_indices( VertexID v_id, VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, VertexID b_id, UnweightedDist iter); inline void reset_at_end( // const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table); // template <typename E_T, typename F> // inline void every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun); template <typename E_T> inline void one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv); // // Function: get the destination host id which is i hop from this host. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_me_host_id(int hop) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // return (host_id + hop + num_hosts) % num_hosts; // } // // Function: get the destination host id which is i hop from the root. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_root_host_id(int hop, int root) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // assert(root >= 0 && root < num_hosts); // return (root + hop + num_hosts) % num_hosts; // } size_t get_index_size() { size_t bytes = 0; for (VertexID v_i = 0; v_i < num_masters; ++v_i) { bytes += L[v_i].size(); } return bytes; } // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // uint64_t normal_check_count = 0; // uint64_t total_candidates_num = 0; // uint64_t set_candidates_num = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //L2CacheMissRate cache_miss; double message_time = 0; double bp_labeling_time = 0; double initializing_time = 0; double scatter_time = 0; double gather_time = 0; double clearup_time = 0; // TotalInstructsExe candidating_ins_count; // TotalInstructsExe adding_ins_count; // TotalInstructsExe bp_labeling_ins_count; // TotalInstructsExe bp_checking_ins_count; // TotalInstructsExe dist_query_ins_count; // End test public: // std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0); DistBVCPLL() = default; explicit DistBVCPLL( const DistGraph &G); // UnweightedDist dist_distance_query_pair( // VertexID a_global, // VertexID b_global, // const DistGraph &G); }; // class DistBVCPLL template <VertexID BATCH_SIZE> DistBVCPLL<BATCH_SIZE>:: DistBVCPLL( const DistGraph &G) { num_v = G.num_v; assert(num_v >= BATCH_SIZE); num_masters = G.num_masters; host_id = G.host_id; // { // if (1 == host_id) { // volatile int i = 0; // while (i == 0) { // sleep(5); // } // } // } num_hosts = G.num_hosts; V_ID_Type = G.V_ID_Type; // L.resize(num_v); L.resize(num_masters); VertexID remainer = num_v % BATCH_SIZE; VertexID b_i_bound = num_v / BATCH_SIZE; std::vector<uint8_t> used_bp_roots(num_v, 0); //cache_miss.measure_start(); double time_labeling = -WallTimer::get_time_mark(); bp_labeling_time -= WallTimer::get_time_mark(); bit_parallel_labeling(G, used_bp_roots); bp_labeling_time += WallTimer::get_time_mark(); {//test //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("host_id: %u bp_labeling_finished.\n", host_id); } //#endif } std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue. VertexID end_active_queue = 0; std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. // std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue. VertexID end_got_candidates_queue = 0; std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue // std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue std::vector<ShortIndex> short_index(num_masters); std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST)); std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue. // Used mainly for resetting short_index[v].indicator. VertexID end_once_candidated_queue = 0; std::vector<uint8_t> once_candidated(num_masters, false); // std::vector<bool> once_candidated(num_masters, false); std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table. std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels //printf("b_i_bound: %u\n", b_i_bound);//test for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i);//test } //#endif batch_process( G, b_i, b_i * BATCH_SIZE, BATCH_SIZE, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // exit(EXIT_SUCCESS); //test } if (remainer != 0) { //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i_bound);//test } //#endif batch_process( G, b_i_bound, b_i_bound * BATCH_SIZE, remainer, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); } time_labeling += WallTimer::get_time_mark(); //cache_miss.measure_stop(); // Test setlocale(LC_NUMERIC, ""); if (0 == host_id) { printf("BATCH_SIZE: %u\n", BATCH_SIZE); printf("BP_Size: %u\n", BITPARALLEL_SIZE); } // Total Number of Labels EdgeID local_num_labels = 0; for (VertexID v_global = 0; v_global < num_v; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size(); } EdgeID global_num_labels; MPI_Allreduce(&local_num_labels, &global_num_labels, 1, MPI_Instance::get_mpi_datatype<EdgeID>(), MPI_SUM, MPI_COMM_WORLD); // printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v); } // printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); // printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100); // uint64_t total_check_count = bp_hit_count + normal_check_count; // printf("total_check_count: %'llu\n", total_check_count); // printf("bp_hit_count: %'llu %.2f%%\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count); // printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n", // total_candidates_num, // set_candidates_num, // set_candidates_num * 100.0 / total_candidates_num); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); //cache_miss.print(); // printf("Candidating: "); candidating_ins_count.print(); // printf("Adding: "); adding_ins_count.print(); // printf("BP_Labeling: "); bp_labeling_ins_count.print(); // printf("BP_Checking: "); bp_checking_ins_count.print(); // printf("distance_query: "); dist_query_ins_count.print(); printf("num_hosts: %u host_id: %u\n" "Local_labeling_time: %.2f seconds\n" "bp_labeling_time: %.2f %.2f%%\n" "initializing_time: %.2f %.2f%%\n" "scatter_time: %.2f %.2f%%\n" "gather_time: %.2f %.2f%%\n" "clearup_time: %.2f %.2f%%\n" "message_time: %.2f %.2f%%\n", num_hosts, host_id, time_labeling, bp_labeling_time, 100.0 * bp_labeling_time / time_labeling, initializing_time, 100.0 * initializing_time / time_labeling, scatter_time, 100.0 * scatter_time / time_labeling, gather_time, 100.0 * gather_time / time_labeling, clearup_time, 100.0 * clearup_time / time_labeling, message_time, 100.0 * message_time / time_labeling); double global_time_labeling; MPI_Allreduce(&time_labeling, &global_time_labeling, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_labeling_time: %.2f seconds\n", global_time_labeling); } // End test } //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling( // const DistGraph &G, // std::vector<uint8_t> &used_bp_roots) //{ //// VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; // // std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_v); // active queue // std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // used_bp_roots[r] = true; // // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. //// VertexID i_bound = G.vertices[r] - 1; //// VertexID i_start = i_bound + G.out_degrees[r]; //// for (VertexID i = i_start; i > i_bound; --i) { // //int i_bound = G.vertices[r]; // //int i_start = i_bound + G.out_degrees[r] - 1; // //for (int i = i_start; i >= i_bound; --i) { // VertexID d_i_bound = G.local_out_degrees[r]; // EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1; // for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) { // EdgeID i = i_start - d_i; // VertexID v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = true; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // //} //// } // // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) { // VertexID v = que[que_i]; //// bit_parallel_push_labels(G, //// v, //// que, //// que_h, //// sibling_es, //// num_sibling_es, //// child_es, //// num_child_es, //// tmp_d, //// d); // EdgeID i_start = G.vertices_idx[v]; // EdgeID i_bound = i_start + G.local_out_degrees[v]; // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv = G.out_edges[i]; // UnweightedDist td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; // } // } // } // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // {// test // printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (4 == d) { //// exit(EXIT_SUCCESS); //// } // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_push_labels( const DistGraph &G, const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, const UnweightedDist iter) { EdgeID i_start = G.vertices_idx[v_global]; EdgeID i_bound = i_start + G.local_out_degrees[v_global]; // {//test // printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); // } for (EdgeID i = i_start; i < i_bound; ++i) { VertexID tv_global = G.out_edges[i]; VertexID tv_local = G.get_local_vertex_id(tv_global); UnweightedDist td = iter + 1; if (iter > dists[tv_local]) { ; } else if (iter == dists[tv_local]) { if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global; tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global; ++size_tmp_sibling_es; // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; } } else { // iter < dists[tv] if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) { tmp_q[offset_tmp_q + size_tmp_q++] = tv_global; } } // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global; tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global; ++size_tmp_child_es; // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; } } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_labeling( const DistGraph &G, // std::vector<IndexType> &L, std::vector<uint8_t> &used_bp_roots) { // Class type of Bit-Parallel label message unit. struct MsgUnitBP { VertexID v_global; uint64_t S_n1; uint64_t S_0; MsgUnitBP() = default; // MsgUnitBP(MsgUnitBP&& other) = default; // MsgUnitBP(MsgUnitBP& other) = default; // MsgUnitBP& operator=(const MsgUnitBP& other) = default; // MsgUnitBP& operator=(MsgUnitBP&& other) = default; MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) : v_global(v), S_n1(sn1), S_0(s0) { } }; // VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; EdgeID local_num_edges = G.num_edges_local; std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<VertexID> que(num_masters); // active queue VertexID end_que = 0; std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que VertexID end_tmp_que = 0; std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. VertexID r_global = 0; // root r for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // {// test // if (0 == host_id) { // printf("i_bpsp: %u\n", i_bpspt); // } // } // Select the root r_global if (0 == host_id) { while (r_global < num_v && used_bp_roots[r_global]) { ++r_global; } if (r_global == num_v) { for (VertexID v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; } continue; } } // Broadcast the r here. message_time -= WallTimer::get_time_mark(); MPI_Bcast(&r_global, 1, V_ID_Type, 0, MPI_COMM_WORLD); message_time += WallTimer::get_time_mark(); used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // Mark the r_global if (G.get_master_host_id(r_global) == host_id) { tmp_d[G.get_local_vertex_id(r_global)] = 0; que[end_que++] = r_global; } // Select the r_global's 64 neighbors { // Get r_global's neighbors into buffer_send, rank from high to low. VertexID local_degree = G.local_out_degrees[r_global]; std::vector<VertexID> buffer_send(local_degree); if (local_degree) { EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; for (VertexID d_i = 0; d_i < local_degree; ++d_i) { EdgeID e_i = e_i_start - d_i; buffer_send[d_i] = G.out_edges[e_i]; } } // Get selected neighbors (up to 64) std::vector<VertexID> selected_nbrs; if (0 != host_id) { // Every host other than 0 sends neighbors to host 0 message_time -= WallTimer::get_time_mark(); MPI_Instance::send_buffer_2_dst(buffer_send, 0, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // Receive selected neighbors from host 0 MPI_Instance::recv_buffer_from_src(selected_nbrs, 0, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); message_time += WallTimer::get_time_mark(); } else { // Host 0 // Host 0 receives neighbors from others std::vector<VertexID> all_nbrs(buffer_send); std::vector<VertexID > buffer_recv; for (int loc = 0; loc < num_hosts - 1; ++loc) { message_time -= WallTimer::get_time_mark(); MPI_Instance::recv_buffer_from_any(buffer_recv, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); message_time += WallTimer::get_time_mark(); if (buffer_recv.empty()) { continue; } buffer_send.resize(buffer_send.size() + buffer_recv.size()); std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); all_nbrs.resize(buffer_send.size()); all_nbrs.assign(buffer_send.begin(), buffer_send.end()); } assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // Select 64 (or less) neighbors VertexID ns = 0; // number of selected neighbor, default 64 for (VertexID v_global : all_nbrs) { if (used_bp_roots[v_global]) { continue; } used_bp_roots[v_global] = 1; selected_nbrs.push_back(v_global); if (++ns == 64) { break; } } // Send selected neighbors to other hosts message_time -= WallTimer::get_time_mark(); for (int dest = 1; dest < num_hosts; ++dest) { MPI_Instance::send_buffer_2_dst(selected_nbrs, dest, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); } message_time += WallTimer::get_time_mark(); } // {//test // printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); // } // Synchronize the used_bp_roots. for (VertexID v_global : selected_nbrs) { used_bp_roots[v_global] = 1; } // Mark selected neighbors for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { VertexID v_global = selected_nbrs[v_i]; if (host_id != G.get_master_host_id(v_global)) { continue; } tmp_que[end_tmp_que++] = v_global; tmp_d[G.get_local_vertex_id(v_global)] = 1; tmp_s[v_global].first = 1ULL << v_i; } } // Reduce the global number of active vertices VertexID global_num_actives = 1; UnweightedDist d = 0; while (global_num_actives) { //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("d: %u que_size: %u\n", d, global_num_actives); // } // } //#endif // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { VertexID num_sibling_es = 0, num_child_es = 0; // Send active masters to mirrors { std::vector<MsgUnitBP> buffer_send(end_que); for (VertexID que_i = 0; que_i < end_que; ++que_i) { VertexID v_global = que[que_i]; buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); } // {// test // printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); // } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgUnitBP> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } // For parallel adding to queue VertexID size_buffer_recv = buffer_recv.size(); std::vector<VertexID> offsets_tmp_q(size_buffer_recv); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) { offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global]; } VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q); std::vector<VertexID> tmp_q(num_neighbors); std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0); // For parallel adding to sibling_es std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors); std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0); // For parallel adding to child_es std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors); std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0); #pragma omp parallel for // for (const MsgUnitBP &m : buffer_recv) { for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgUnitBP &m = buffer_recv[i_m]; VertexID v_global = m.v_global; if (!G.local_out_degrees[v_global]) { continue; } tmp_s[v_global].first = m.S_n1; tmp_s[v_global].second = m.S_0; // Push labels bit_parallel_push_labels( G, v_global, tmp_q, sizes_tmp_q[i_m], tmp_sibling_es, sizes_tmp_sibling_es[i_m], tmp_child_es, sizes_tmp_child_es[i_m], offsets_tmp_q[i_m], // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, tmp_d, d); } {// From tmp_sibling_es to sibling_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es); PADO::collect_into_queue( tmp_sibling_es, offsets_tmp_q, sizes_tmp_sibling_es, total_size_tmp, sibling_es, num_sibling_es); } {// From tmp_child_es to child_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es); PADO::collect_into_queue( tmp_child_es, offsets_tmp_q, sizes_tmp_child_es, total_size_tmp, child_es, num_child_es); } {// From tmp_q to tmp_que idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q); PADO::collect_into_queue( tmp_q, offsets_tmp_q, sizes_tmp_q, total_size_tmp, tmp_que, end_tmp_que); } // {// test // printf("host_id: %u root: %u done push.\n", host_id, root); // } } } // Update the sets in tmp_s { #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; } // Put into the buffer sending to others std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first; VertexID w = sibling_es[i].second; buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); } // Send the messages for (int root = 0; root < num_hosts; ++root) { std::vector< std::pair<VertexID, uint64_t> > buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } size_t i_m_bound = buffer_recv.size(); #pragma omp parallel for for (size_t i_m = 0; i_m < i_m_bound; ++i_m) { const auto &m = buffer_recv[i_m]; __atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST); } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } } #pragma omp parallel for for (VertexID i = 0; i < num_child_es; ++i) { VertexID v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } } //#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // //// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (0 == d) { //// exit(EXIT_SUCCESS); //// } // } //#endif // Swap que and tmp_que tmp_que.swap(que); end_que = end_tmp_que; end_tmp_que = 0; MPI_Allreduce(&end_que, &global_num_actives, 1, V_ID_Type, MPI_SUM, MPI_COMM_WORLD); // } ++d; } #pragma omp parallel for for (VertexID v_local = 0; v_local < num_masters; ++v_local) { VertexID v_global = G.get_global_vertex_id(v_local); L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_push_labels( // const DistGraph &G, // const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // const UnweightedDist iter) //{ // EdgeID i_start = G.vertices_idx[v_global]; // EdgeID i_bound = i_start + G.local_out_degrees[v_global]; //// {//test //// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); //// } // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv_global = G.out_edges[i]; // VertexID tv_local = G.get_local_vertex_id(tv_global); // UnweightedDist td = iter + 1; // // if (iter > dists[tv_local]) { // ; // } else if (iter == dists[tv_local]) { // if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; // } // } else { // iter < dists[tv] // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; //// { //// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test //// } // } // } // //} // //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots) //{ // // Class type of Bit-Parallel label message unit. // struct MsgUnitBP { // VertexID v_global; // uint64_t S_n1; // uint64_t S_0; // // MsgUnitBP() = default; //// MsgUnitBP(MsgUnitBP&& other) = default; //// MsgUnitBP(MsgUnitBP& other) = default; //// MsgUnitBP& operator=(const MsgUnitBP& other) = default; //// MsgUnitBP& operator=(MsgUnitBP&& other) = default; // MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) // : v_global(v), S_n1(sn1), S_0(s0) { } // }; //// VertexID num_v = G.num_v; //// EdgeID num_e = G.num_e; // EdgeID local_num_edges = G.num_edges_local; // // std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_masters); // active queue // VertexID end_que = 0; // std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que // VertexID end_tmp_que = 0; // std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. // //// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v //// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} //// std::vector<VertexID> que(num_v); // active queue //// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) //// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r_global = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // // Select the root r_global // if (0 == host_id) { // while (r_global < num_v && used_bp_roots[r_global]) { // ++r_global; // } // if (r_global == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // } // // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&r_global, // 1, // V_ID_Type, // 0, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // //// VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // // Mark the r_global // if (G.get_master_host_id(r_global) == host_id) { // tmp_d[G.get_local_vertex_id(r_global)] = 0; // que[end_que++] = r_global; // } // // Select the r_global's 64 neighbors // { // // Get r_global's neighbors into buffer_send, rank from low to high. // VertexID local_degree = G.local_out_degrees[r_global]; // std::vector<VertexID> buffer_send(local_degree); // if (local_degree) { // EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; // for (VertexID d_i = 0; d_i < local_degree; ++d_i) { // EdgeID e_i = e_i_start - d_i; // buffer_send[d_i] = G.out_edges[e_i]; // } // } // // // Get selected neighbors (up to 64) // std::vector<VertexID> selected_nbrs; // if (0 != host_id) { // // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // 0, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); // // Receive selected neighbors from host 0 // MPI_Instance::recv_buffer_from_src(selected_nbrs, // 0, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // } else { // // Host 0 // // Host 0 receives neighbors from others // std::vector<VertexID> all_nbrs(buffer_send); // std::vector<VertexID > buffer_recv; // for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); // MPI_Instance::recv_buffer_from_any(buffer_recv, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); //// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv, //// num_hosts, //// SENDING_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // if (buffer_recv.empty()) { // continue; // } // // buffer_send.resize(buffer_send.size() + buffer_recv.size()); // std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); // all_nbrs.resize(buffer_send.size()); // all_nbrs.assign(buffer_send.begin(), buffer_send.end()); // } // assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // // Select 64 (or less) neighbors // VertexID ns = 0; // number of selected neighbor, default 64 // for (VertexID v_global : all_nbrs) { // if (used_bp_roots[v_global]) { // continue; // } // used_bp_roots[v_global] = 1; // selected_nbrs.push_back(v_global); // if (++ns == 64) { // break; // } // } // // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); // for (int dest = 1; dest < num_hosts; ++dest) { // MPI_Instance::send_buffer_2_dst(selected_nbrs, // dest, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // } // message_time += WallTimer::get_time_mark(); // } //// {//test //// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); //// } // // // Synchronize the used_bp_roots. // for (VertexID v_global : selected_nbrs) { // used_bp_roots[v_global] = 1; // } // // // Mark selected neighbors // for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { // VertexID v_global = selected_nbrs[v_i]; // if (host_id != G.get_master_host_id(v_global)) { // continue; // } // tmp_que[end_tmp_que++] = v_global; // tmp_d[G.get_local_vertex_id(v_global)] = 1; // tmp_s[v_global].first = 1ULL << v_i; // } // } // // // Reduce the global number of active vertices // VertexID global_num_actives = 1; // UnweightedDist d = 0; // while (global_num_actives) { //// for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // // // Send active masters to mirrors // { // std::vector<MsgUnitBP> buffer_send(end_que); // for (VertexID que_i = 0; que_i < end_que; ++que_i) { // VertexID v_global = que[que_i]; // buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); // } //// {// test //// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); //// } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgUnitBP> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgUnitBP &m : buffer_recv) { // VertexID v_global = m.v_global; // if (!G.local_out_degrees[v_global]) { // continue; // } // tmp_s[v_global].first = m.S_n1; // tmp_s[v_global].second = m.S_0; // // Push labels // bit_parallel_push_labels(G, // v_global, // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, // tmp_d, // d); // } //// {// test //// printf("host_id: %u root: %u done push.\n", host_id, root); //// } // } // } // // // Update the sets in tmp_s // { // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; // // } // // Put into the buffer sending to others // std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); //// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1); // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first; // VertexID w = sibling_es[i].second; //// buffer_send.emplace_back(v, tmp_s[v].second); //// buffer_send.emplace_back(w, tmp_s[w].second); // buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); // buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); // } // // Send the messages // for (int root = 0; root < num_hosts; ++root) { // std::vector< std::pair<VertexID, uint64_t> > buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // } ////#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // } ////#endif // // // Swap que and tmp_que // tmp_que.swap(que); // end_que = end_tmp_que; // end_tmp_que = 0; // MPI_Allreduce(&end_que, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // //// } // ++d; // } // // for (VertexID v_local = 0; v_local < num_masters; ++v_local) { // VertexID v_global = G.get_global_vertex_id(v_local); // L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; // L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} // L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } //} //// Function bit parallel checking: //// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking( // VertexID v_id, // VertexID w_id, // const std::vector<IndexType> &L, // UnweightedDist iter) //{ // // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // const IndexType &Lv = L[v_id]; // const IndexType &Lw = L[w_id]; // // _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF. // if (td - 2 <= iter) { // td += // (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 : // ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) | // (Lv.bp_sets[i][1] & Lw.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { //// ++bp_hit_count; // return false; // } // } // } // return true; //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <VertexID BATCH_SIZE> inline VertexID DistBVCPLL<BATCH_SIZE>:: initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots) { // Get the roots_master_local, containing all local roots. std::vector<VertexID> roots_master_local; VertexID roots_bound = roots_start + roots_size; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { roots_master_local.push_back(G.get_local_vertex_id(r_global)); } } VertexID size_roots_master_local = roots_master_local.size(); // Short_index { if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } else { for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } end_once_candidated_queue = 0; if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels // short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself // short_index[r_local].indicator.set(BATCH_SIZE); // v got labels } } else { for (VertexID r_local : roots_master_local) { short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels // short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself // short_index[r_local].indicator.set(BATCH_SIZE); // v got labels } } } // // Real Index { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; IndexType &Lr = L[r_local]; Lr.batches.emplace_back( b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } else { for (VertexID r_local : roots_master_local) { IndexType &Lr = L[r_local]; Lr.batches.emplace_back( b_id, // Batch ID Lr.distances.size(), // start_index 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } } // Dist Table { // struct LabelTableUnit { // VertexID root_id; // VertexID label_global_id; // UnweightedDist dist; // // LabelTableUnit() = default; // // LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : // root_id(r), label_global_id(l), dist(d) {} // }; std::vector<LabelTableUnit> buffer_send; // buffer for sending // Dist_matrix { // Deprecated Old method: unpack the IndexType structure before sending. // Okay, it's back. if (size_roots_master_local >= THRESHOLD_PARALLEL) { // Offsets for adding labels to buffer_send in parallel std::vector<VertexID> offsets_beffer_send(size_roots_master_local); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; offsets_beffer_send[i_r] = L[r_local].vertices.size(); } EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send); buffer_send.resize(size_labels); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; VertexID top_location = 0; IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending buffer_send[offsets_beffer_send[i_r] + top_location++] = LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist); } } } } } else { for (VertexID r_local : roots_master_local) { // The distance table. IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; VertexID b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lr.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, dist); // buffer for sending } } } } } } // Broadcast local roots labels for (int root = 0; root < num_hosts; ++root) { std::vector<LabelTableUnit> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record root_id's number of its received label, for later adding to recved_dist_table __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); // recved_dist_table[root_id].push_back(label_global_id); } // Record the received label in recved_dist_table, for later reset #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID &size = sizes_recved_root_labels[root_id]; if (size) { recved_dist_table[root_id].resize(size); size = 0; } } #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id); } } else { for (const LabelTableUnit &l : buffer_recv) { VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record the received label in recved_dist_table, for later reset recved_dist_table[root_id].push_back(label_global_id); } } } } // Build the Bit-Parallel Labels Table { // struct MsgBPLabel { // VertexID r_root_id; // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // MsgBPLabel() = default; // MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) // : r_root_id(r) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // std::vector<MPI_Request> requests_send(num_hosts - 1); std::vector<MsgBPLabel> buffer_send; std::vector<VertexID> roots_queue; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) != host_id) { continue; } roots_queue.push_back(r_global); } VertexID size_roots_queue = roots_queue.size(); if (size_roots_queue >= THRESHOLD_PARALLEL) { buffer_send.resize(size_roots_queue); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) { VertexID r_global = roots_queue[i_r]; VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } else { // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } for (VertexID r_global : roots_queue) { VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Local roots // memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // Prepare for sending buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgBPLabel> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } VertexID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgBPLabel &m = buffer_recv[i_m]; VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } else { for (const MsgBPLabel &m : buffer_recv) { VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } } } // Active_queue VertexID global_num_actives = 0; // global number of active vertices. { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; active_queue[i_r] = r_local; } end_active_queue = size_roots_master_local; } else { for (VertexID r_local : roots_master_local) { active_queue[end_active_queue++] = r_local; } } // Get the global number of active vertices; message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_SUM, MPI_COMM_WORLD); message_time += WallTimer::get_time_mark(); } return global_num_actives; } // Sequential Version //// Function for initializing at the begin of a batch //// For a batch, initialize the temporary labels and real labels of roots; //// traverse roots' labels to initialize distance buffer; //// unset flag arrays is_active and got_labels //template <VertexID BATCH_SIZE> //inline VertexID DistBVCPLL<BATCH_SIZE>:: //initialization( // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated, // VertexID b_id, // VertexID roots_start, // VertexID roots_size, //// std::vector<VertexID> &roots_master_local, // const std::vector<uint8_t> &used_bp_roots) //{ // // Get the roots_master_local, containing all local roots. // std::vector<VertexID> roots_master_local; // VertexID roots_bound = roots_start + roots_size; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { // roots_master_local.push_back(G.get_local_vertex_id(r_global)); // } // } // // Short_index // { // for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // VertexID v_local = once_candidated_queue[v_i]; // short_index[v_local].indicator_reset(); // once_candidated[v_local] = 0; // } // end_once_candidated_queue = 0; // for (VertexID r_local : roots_master_local) { // short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels //// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself //// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels // } // } //// // // Real Index // { // for (VertexID r_local : roots_master_local) { // IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size // Lr.distances.emplace_back( // Lr.vertices.size(), // start_index // 1, // size // 0); // dist // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); // } // } // // // Dist Table // { //// struct LabelTableUnit { //// VertexID root_id; //// VertexID label_global_id; //// UnweightedDist dist; //// //// LabelTableUnit() = default; //// //// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : //// root_id(r), label_global_id(l), dist(d) {} //// }; // std::vector<LabelTableUnit> buffer_send; // buffer for sending // // Dist_matrix // { // // Deprecated Old method: unpack the IndexType structure before sending. // for (VertexID r_local : roots_master_local) { // // The distance table. // IndexType &Lr = L[r_local]; // VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // // Write into the dist_table //// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending // } // } // } // } // } // // Broadcast local roots labels // for (int root = 0; root < num_hosts; ++root) { // std::vector<LabelTableUnit> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const LabelTableUnit &l : buffer_recv) { // VertexID root_id = l.root_id; // VertexID label_global_id = l.label_global_id; // UnweightedDist dist = l.dist; // dist_table[root_id][label_global_id] = dist; // // Record the received label in recved_dist_table, for later reset // recved_dist_table[root_id].push_back(label_global_id); // } // } // } // // // Build the Bit-Parallel Labels Table // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // // // TODO: parallel enqueue // // Active_queue // VertexID global_num_actives = 0; // global number of active vertices. // { // for (VertexID r_local : roots_master_local) { // active_queue[end_active_queue++] = r_local; // } // // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } // // return global_num_actives; //} //// Function: push v_head_global's newly added labels to its all neighbors. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // const BPLabelType &L_label = bp_labels_table[label_root_id]; // VertexID label_global_id = label_root_id + roots_start; // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // if (v_tail_global <= label_global_id) { // // remaining v_tail_global has higher rank than the label // return; // } // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } //// {// Just for the complain from the compiler //// assert(iter >= iter); //// } //} template<VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter) { std::vector<std::pair<VertexID, VertexID> > buffer_send_indices; //.first: Vertex ID //.second: size of labels std::vector<VertexID> buffer_send_labels; if (local_size) { const VertexID start_active_queue = global_start; const VertexID size_active_queue = global_size <= local_size ? global_size : local_size; const VertexID bound_active_queue = start_active_queue + size_active_queue; buffer_send_indices.resize(size_active_queue); // Prepare offset for inserting std::vector<VertexID> offsets_buffer_locs(size_active_queue); #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active const IndexType &Lv = L[v_head_local]; offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; } EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); // } // } buffer_send_labels.resize(size_buffer_send_labels); // {// test // if (0 == host_id) { // printf("buffer_send_labels created.\n"); // } // } // Build buffer_send_labels by parallel inserting #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID tmp_i_q = i_q - start_active_queue; VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active VertexID v_head_global = G.get_global_vertex_id(v_head_local); const IndexType &Lv = L[v_head_local]; // Prepare the buffer_send_indices buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // These 2 index are used for traversing v_head's last inserted labels VertexID l_i_start = Lv.distances.rbegin()->start_index; VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; VertexID top_labels = offsets_buffer_locs[tmp_i_q]; for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { VertexID label_root_id = Lv.vertices[l_i]; buffer_send_labels[top_labels++] = label_root_id; // buffer_send_labels.push_back(label_root_id); } } } //////////////////////////////////////////////// //// // const VertexID bound_active_queue = start_active_queue + size_active_queue; // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(size_active_queue); //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); //// {// test //// if (0 == host_id) { //// double memtotal = 0; //// double memfree = 0; //// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); //// PADO::Utils::system_memory(memtotal, memfree); //// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", //// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); //// } //// } // buffer_send_labels.resize(size_buffer_send_labels); //// {// test //// if (0 == host_id) { //// printf("buffer_send_labels created.\n"); //// } //// } // // // Build buffer_send_labels by parallel inserting //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID tmp_i_q = i_q - start_active_queue; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // VertexID top_labels = offsets_buffer_locs[tmp_i_q]; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } //// end_active_queue = 0; //// //////////////////////////////////////////////// for (int root = 0; root < num_hosts; ++root) { // Get the indices std::vector<std::pair<VertexID, VertexID> > indices_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_indices, indices_buffer); if (indices_buffer.empty()) { continue; } // Get the labels std::vector<VertexID> labels_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_labels, labels_buffer); VertexID size_indices_buffer = indices_buffer.size(); // Prepare the offsets for reading indices_buffer std::vector<EdgeID> starts_locs_index(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; starts_locs_index[i_i] = e.second; } EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // Prepare the offsets for inserting v_tails into queue std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; } EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs); std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0); std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs); std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { VertexID v_head_global = indices_buffer[i_i].first; EdgeID start_index = starts_locs_index[i_i]; EdgeID bound_index = i_i != size_indices_buffer - 1 ? starts_locs_index[i_i + 1] : total_recved_labels; if (G.local_out_degrees[v_head_global]) { local_push_labels_para( v_head_global, start_index, bound_index, roots_start, labels_buffer, G, short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, tmp_got_candidates_queue, sizes_tmp_got_candidates_queue[i_i], offsets_tmp_queue[i_i], got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_i], once_candidated, bp_labels_table, used_bp_roots, iter); } } {// Collect elements from tmp_got_candidates_queue to got_candidates_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); PADO::collect_into_queue( tmp_got_candidates_queue, offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue total_new, got_candidates_queue, end_got_candidates_queue); } {// Collect elements from tmp_once_candidated_queue to once_candidated_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); PADO::collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue total_new, once_candidated_queue, end_once_candidated_queue); } } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator[label_root_id] = 1; {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local; } // once_candidated[v_tail_local] = 1; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = 1; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into got_candidates queue // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = 1; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } if (!got_candidates[v_tail_local]) { if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local; } } } } // { // assert(iter >= iter); // } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail_global SI_v_tail.indicator[label_root_id] = 1; // SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in once_candidated[v_tail_local] = 1; once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } if (SI_v_tail.is_candidate[label_root_id]) { continue; } SI_v_tail.is_candidate[label_root_id] = 1; SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!got_candidates[v_tail_local]) { // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) got_candidates[v_tail_local] = 1; got_candidates_queue[end_got_candidates_queue++] = v_tail_local; } } } // { // assert(iter >= iter); // } } //// Function: pushes v_head's labels to v_head's every (master) neighbor //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // // The data structure of a message //// std::vector< LabelUnitType > buffer_recv; // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin() -> start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // // Traverse v_head's every neighbor v_tail // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // // Traverse v_head's last inserted labels // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // VertexID label_global_id = label_root_id + roots_start; // if (v_tail_global <= label_global_id) { // // v_tail_global has higher rank than the label // continue; // } // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } // } // // { // assert(iter >= iter); // } //} //// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts //// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all //// code of this function into the caller, all messages become right. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //sync_masters_2_mirrors( // const DistGraph &G, // const std::vector<VertexID> &active_queue, // VertexID end_active_queue, // std::vector< std::pair<VertexID, VertexID> > &buffer_send, // std::vector<MPI_Request> &requests_send //) //{ //// std::vector< std::pair<VertexID, VertexID> > buffer_send; // // pair.first: Owener vertex ID of the label // // pair.first: label vertex ID of the label // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send.emplace_back(v_head_global, label_root_id); //// {//test //// if (1 == host_id) { //// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);// //// } //// } // } // } // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // assert(!requests_send.empty()); // } // // // Send messages // for (int loc = 0; loc < num_hosts - 1; ++loc) { // int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc); // MPI_Isend(buffer_send.data(), // MPI_Instance::get_sending_size(buffer_send), // MPI_CHAR, // dest_host_id, // SENDING_MASTERS_TO_MIRRORS, // MPI_COMM_WORLD, // &requests_send[loc]); // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // } // } //} // Function for distance query; // traverse vertex v_id's labels; // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <VertexID BATCH_SIZE> inline bool DistBVCPLL<BATCH_SIZE>:: distance_query( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id_local]; // Traverse v_id's all existing labels VertexID b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); //_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; VertexID dist_start_index = Lv.batches[b_i].start_index; VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_table for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { UnweightedDist dist = Lv.distances[dist_i].dist; if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } VertexID v_start_index = Lv.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); _mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0); for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_table[cand_root_id][v] does not exist. continue; } VertexID d_tmp = dist + dist_table[cand_root_id][v]; if (d_tmp <= iter) { return false; } } } } return true; } //// Sequential version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_seq( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send) // UnweightedDist iter) { L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { VertexID cand_real_id = cand_root_id + roots_start; // dist_table[v_root_id][cand_real_id] = iter; // Put the update into the buffer_send for later sending buffer_send.emplace_back(v_root_id, cand_real_id); } } //// Parallel Version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send) { L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { VertexID cand_real_id = cand_root_id + roots_start; // Put the update into the buffer_send for later sending // buffer_send.emplace_back(v_root_id, cand_real_id); tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id); } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: update_label_indices( VertexID v_id_local, VertexID inserted_count, // std::vector<IndexType> &L, std::vector<ShortIndex> &short_index, VertexID b_id, UnweightedDist iter) { IndexType &Lv = L[v_id_local]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id_local].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin() -> size); } else { short_index[v_id_local].indicator[BATCH_SIZE] = 1; // short_index[v_id_local].indicator.set(BATCH_SIZE); // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.emplace_back( b_id, // batch id Lv.distances.size(), // start index 1); // size } // Insert a new distance element with start_index, size, and dist Lv.distances.emplace_back( Lv.vertices.size() - inserted_count, // start index inserted_count, // size iter); // distance } // Function to reset dist_table the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: reset_at_end( // const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table) { // // Reset dist_table according to local masters' labels // for (VertexID r_local_id : roots_master_local) { // IndexType &Lr = L[r_local_id]; // VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST; // } // } // } // } // Reset dist_table according to received masters' labels from other hosts for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { for (VertexID cand_real_id : recved_dist_table[r_root_id]) { dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST; } recved_dist_table[r_root_id].clear(); } // Reset bit-parallel labels table for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist)); memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets)); } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: batch_process( const DistGraph &G, const VertexID b_id, const VertexID roots_start, // start id of roots const VertexID roots_size, // how many roots in the batch const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated) // std::vector<bool> &once_candidated) { // At the beginning of a batch, initialize the labels L and distance buffer dist_table; initializing_time -= WallTimer::get_time_mark(); VertexID global_num_actives = initialization(G, short_index, dist_table, recved_dist_table, bp_labels_table, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, b_id, roots_start, roots_size, // roots_master_local, used_bp_roots); initializing_time += WallTimer::get_time_mark(); UnweightedDist iter = 0; // The iterator, also the distance for current iteration // {//test // if (0 == host_id) { // printf("host_id: %u initialization finished.\n", host_id); // } // } while (global_num_actives) { ++iter; //#ifdef DEBUG_MESSAGES_ON {//test // if (0 == host_id) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("iter: %u " "host_id: %d " "global_num_actives: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", iter, host_id, global_num_actives, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); // } } //#endif // Traverse active vertices to push their labels as candidates // Send masters' newly added labels to other hosts { scatter_time -= WallTimer::get_time_mark(); // Divide the pushing into many-time runs. const VertexID chunk_size = 1 << 20; VertexID remainder = global_num_actives % chunk_size; VertexID bound_global_i = global_num_actives - remainder; // VertexID remainder = end_active_queue % chunk_size; // VertexID bound_active_queue = end_active_queue - remainder; VertexID local_size; for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) { if (global_i < end_active_queue) { local_size = end_active_queue - global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, global_i, chunk_size, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } if (remainder) { if (bound_global_i < end_active_queue) { local_size = end_active_queue - bound_global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, bound_global_i, remainder, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } // // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // 0, // end_active_queue, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); end_active_queue = 0; scatter_time += WallTimer::get_time_mark(); } //// For Backup // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // if (end_active_queue >= THRESHOLD_PARALLEL) { // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(end_active_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); // buffer_send_labels.resize(size_buffer_send_labels); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID top_labels = 0; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[offsets_buffer_locs[i_q] + top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } // } else { // // Sequential Version // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // VertexID size_indices_buffer = indices_buffer.size(); // if (size_indices_buffer >= THRESHOLD_PARALLEL) { // // Prepare the offsets for reading indices_buffer // std::vector<EdgeID> starts_locs_index(size_indices_buffer); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; // starts_locs_index[i_i] = e.second; // } // EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // // // Prepare the offsets for inserting v_tails into queue // std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; // offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; // } // EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); // std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs); // std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0); // std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs); // std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // VertexID v_head_global = indices_buffer[i_i].first; // EdgeID start_index = starts_locs_index[i_i]; // EdgeID bound_index = i_i != size_indices_buffer - 1 ? // starts_locs_index[i_i + 1] : total_recved_labels; // if (G.local_out_degrees[v_head_global]) { // local_push_labels_para( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // // std::vector<VertexID> &got_candidates_queue, // // VertexID &end_got_candidates_queue, // tmp_got_candidates_queue, // sizes_tmp_got_candidates_queue[i_i], // offsets_tmp_queue[i_i], // got_candidates, // // std::vector<VertexID> &once_candidated_queue, // // VertexID &end_once_candidated_queue, // tmp_once_candidated_queue, // sizes_tmp_once_candidated_queue[i_i], // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // } // // {// Collect elements from tmp_got_candidates_queue to got_candidates_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); // PADO::collect_into_queue( // tmp_got_candidates_queue, // offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue // sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue // total_new, // got_candidates_queue, // end_got_candidates_queue); // } // {// Collect elements from tmp_once_candidated_queue to once_candidated_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // PADO::collect_into_queue( // tmp_once_candidated_queue, // offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue // sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue // total_new, // once_candidated_queue, // end_once_candidated_queue); // } // } else { // // Sequential Version // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> &e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels_seq( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // } // scatter_time += WallTimer::get_time_mark(); // } // {//test // if (0 == host_id) { // printf("iter: %u pushing labels finished.\n", iter); // } // } // Traverse vertices in the got_candidates_queue to insert labels { gather_time -= WallTimer::get_time_mark(); std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // pair.first: root id // pair.second: label (global) id of the root // if (true) { if (end_got_candidates_queue >= THRESHOLD_PARALLEL) { // Prepare for parallel active_queue // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already. // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it. std::vector<VertexID> offsets_tmp_active_queue(end_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { offsets_tmp_active_queue[i_q] = i_q; } std::vector<VertexID> tmp_active_queue(end_got_candidates_queue); std::vector<VertexID> sizes_tmp_active_queue(end_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually. // Prepare for parallel buffer_send std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { VertexID v_id_local = got_candidates_queue[i_q]; VertexID v_global_id = G.get_global_vertex_id(v_id_local); if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // If v_global_id is root, its new labels should be put into buffer_send offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que; } else { offsets_tmp_buffer_send[i_q] = 0; } } EdgeID total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send = total_send_labels * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_tmp_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024); // } // } std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send(total_send_labels); // {// test // if (0 == host_id) { // printf("tmp_buffer_send created.\n"); // } // } std::vector<EdgeID> sizes_tmp_buffer_send(end_got_candidates_queue, 0); #pragma omp parallel for for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_para( cand_root_id, v_id_local, roots_start, roots_size, G, tmp_buffer_send, sizes_tmp_buffer_send[i_queue], offsets_tmp_buffer_send[i_queue]); // buffer_send); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, b_id, iter); } } {// Collect elements from tmp_active_queue to active_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); PADO::collect_into_queue( tmp_active_queue, offsets_tmp_active_queue, sizes_tmp_active_queue, total_new, active_queue, end_active_queue); } {// Collect elements from tmp_buffer_send to buffer_send EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send = total_new * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024); // } // } buffer_send.resize(total_new); // {// test // if (0 == host_id) { // printf("buffer_send created.\n"); // } // } EdgeID zero_size = 0; PADO::collect_into_queue( tmp_buffer_send, offsets_tmp_buffer_send, sizes_tmp_buffer_send, total_new, buffer_send, zero_size); // {//test // if (iter == 6) { // for (VertexID i_b = 0; i_b < total_new; ++i_b) { // const auto &e = buffer_send[i_b]; // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // if (root_id > 1024) { // printf("total_new: %lu " // "buffer_send[%u]: " // "root_id: %u " // "cand_real_id: %u\n", // total_new, // i_b, // root_id, // cand_real_id); // exit(1); // } // } // } // } } } else { for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; active_queue[end_active_queue++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_seq( cand_root_id, v_id_local, roots_start, roots_size, G, // dist_table, buffer_send); // iter); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, short_index, b_id, iter); } } } // {//test // printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); // } end_got_candidates_queue = 0; // Set the got_candidates_queue empty // {//test // if (iter == 6) { // for (VertexID i_b = 0; i_b < buffer_send.size(); ++i_b) { // const auto &e = buffer_send[i_b]; // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // if (root_id > 1024) { // printf("buffer_send.size(): %lu " // "buffer_send[%u]: " // "root_id: %u " // "cand_real_id: %u\n", // buffer_send.size(), // i_b, // root_id, // cand_real_id); // exit(1); // } // } // } // } // Sync the dist_table for (int root = 0; root < num_hosts; ++root) { std::vector<std::pair<VertexID, VertexID>> buffer_recv; // {//test //// if (iter == 6) { // if (buffer_send.size() == 66) { // printf("L%u: " // "iter: %u\n", // __LINE__, // iter); // exit(1); // for (VertexID i_b = 0; i_b < buffer_send.size(); ++i_b) { // const auto &e = buffer_send[i_b]; // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // if (root_id > 1024) { // printf("buffer_send.size(): %lu " // "buffer_send[%u]: " // "root_id: %u " // "cand_real_id: %u\n", // buffer_send.size(), // i_b, // root_id, // cand_real_id); // fflush(stdout); // exit(1); // } // } // } //// MPI_Barrier(MPI_COMM_WORLD); // } one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); {//test // if (6 == (VertexID) iter && size_buffer_recv == 66) { if (iter == 6 && size_buffer_recv == 66) { for (VertexID i_b = 0; i_b < size_buffer_recv; ++i_b) { const auto &e = buffer_recv[i_b]; VertexID root_id = e.first; VertexID cand_real_id = e.second; if (root_id > 1024) { printf("size_buffer_recv: %lu " "buffer_recv[%u]: " "root_id: %u " "cand_real_id: %u\n", size_buffer_recv, i_b, root_id, cand_real_id); exit(1); } } } } if (size_buffer_recv >= THRESHOLD_PARALLEL) { // Get label number for every root std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); } // Resize the recved_dist_table for every root #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID old_size = recved_dist_table[root_id].size(); VertexID tmp_size = sizes_recved_root_labels[root_id]; if (tmp_size) { recved_dist_table[root_id].resize(old_size + tmp_size); sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // If tmp_size == 0, root_id has no received labels. // sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // Recorde received labels in recved_dist_table #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id); } } else { for (const std::pair<VertexID, VertexID> &e : buffer_recv) { VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; // Record the received element, for future reset recved_dist_table[root_id].push_back(cand_real_id); } } } // Sync the global_num_actives MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_MAX, // MPI_SUM, MPI_COMM_WORLD); gather_time += WallTimer::get_time_mark(); } // {//test // if (0 == host_id) { // printf("iter: %u inserting labels finished.\n", iter); // } // } } // Reset the dist_table clearup_time -= WallTimer::get_time_mark(); reset_at_end( // G, // roots_start, // roots_master_local, dist_table, recved_dist_table, bp_labels_table); clearup_time += WallTimer::get_time_mark(); // {//test // if (0 == host_id) { // printf("host_id: %u resetting finished.\n", host_id); // } // } } //// Sequential Version //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //batch_process( // const DistGraph &G, // VertexID b_id, // VertexID roots_start, // start id of roots // VertexID roots_size, // how many roots in the batch // const std::vector<uint8_t> &used_bp_roots, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<uint8_t> &got_candidates, //// std::vector<bool> &got_candidates, // std::vector<uint8_t> &is_active, //// std::vector<bool> &is_active, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated) //// std::vector<bool> &once_candidated) //{ // // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // VertexID global_num_actives = initialization(G, // short_index, // dist_table, // recved_dist_table, // bp_labels_table, // active_queue, // end_active_queue, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // b_id, // roots_start, // roots_size, //// roots_master_local, // used_bp_roots); // initializing_time += WallTimer::get_time_mark(); // UnweightedDist iter = 0; // The iterator, also the distance for current iteration //// {//test //// printf("host_id: %u initialization finished.\n", host_id); //// } // // // while (global_num_actives) { ////#ifdef DEBUG_MESSAGES_ON //// {// //// if (0 == host_id) { //// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives); //// } //// } ////#endif // ++iter; // // Traverse active vertices to push their labels as candidates // // Send masters' newly added labels to other hosts // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // scatter_time += WallTimer::get_time_mark(); // } // // // Traverse vertices in the got_candidates_queue to insert labels // { // gather_time -= WallTimer::get_time_mark(); // std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // // pair.first: root id // // pair.second: label (global) id of the root // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter) ) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, //// dist_table, // buffer_send); //// iter); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, // short_index, // b_id, // iter); // } // } //// {//test //// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); //// } // end_got_candidates_queue = 0; // Set the got_candidates_queue empty // // Sync the dist_table // for (int root = 0; root < num_hosts; ++root) { // std::vector<std::pair<VertexID, VertexID>> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, VertexID> &e : buffer_recv) { // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // dist_table[root_id][cand_real_id] = iter; // // Record the received element, for future reset // recved_dist_table[root_id].push_back(cand_real_id); // } // } // // // Sync the global_num_actives // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); // } // } // // // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); // reset_at_end( //// G, //// roots_start, //// roots_master_local, // dist_table, // recved_dist_table, // bp_labels_table); // clearup_time += WallTimer::get_time_mark(); //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Every host h_i broadcast to others // for (int root = 0; root < num_hosts; ++root) { // std::vector<E_T> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } //// uint64_t size_buffer_send = buffer_send.size(); //// // Sync the size_buffer_send. //// message_time -= WallTimer::get_time_mark(); //// MPI_Bcast(&size_buffer_send, //// 1, //// MPI_UINT64_T, //// root, //// MPI_COMM_WORLD); //// message_time += WallTimer::get_time_mark(); ////// {// test ////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); ////// } //// if (!size_buffer_send) { //// continue; //// } //// message_time -= WallTimer::get_time_mark(); //// std::vector<E_T> buffer_recv(size_buffer_send); //// if (host_id == root) { //// buffer_recv.assign(buffer_send.begin(), buffer_send.end()); //// } //// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; //// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) { //// // Only need 1 broadcast //// //// MPI_Bcast(buffer_recv.data(), //// bytes_buffer_send, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// } else { //// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; //// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; //// size_t offset = 0; //// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { ////// size_t offset = b_i * unit_buffer_size; //// size_t size_unit_buffer = b_i == num_unit_buffers - 1 //// ? size_buffer_send - offset //// : unit_buffer_size; //// MPI_Bcast(buffer_recv.data() + offset, //// size_unit_buffer * ETypeSize, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// offset += unit_buffer_size; //// } //// } //// message_time += WallTimer::get_time_mark(); // for (const E_T &e : buffer_recv) { // fun(e); // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // // Every host sends to others // for (int src = 0; src < num_hosts; ++src) { // if (host_id == src) { // // Send from src // message_time -= WallTimer::get_time_mark(); // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, host_id); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // } // message_time += WallTimer::get_time_mark(); // } else { // // Receive from src // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, src); // if (host_id == dst) { // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // Every host sends (num_hosts - 1) times // for (int hop = 1; hop < num_hosts; ++hop) { // int src = hop_2_me_host_id(-hop); // int dst = hop_2_me_host_id(hop); // if (src != dst) { // Normal case // // When host_id is odd, first receive, then send. // if (static_cast<uint32_t>(host_id) & 1U) { // message_time -= WallTimer::get_time_mark(); // // Receive first. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // // Send then. // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // When host_id is even, first send, then receive. // // Send first. // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // // Receive then. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } else { // If host_id is higher than dst, first send, then receive // // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2. // if (host_id < dst) { // // Send // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Receive // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // Otherwise, if host_id is lower than dst, first receive, then send // // Receive // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Send // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } //} //// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // // Every host h_i broadcast to others // for (int h_i = 0; h_i < num_hosts; ++h_i) { // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); //// {// test //// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); //// } // if (!size_buffer_send) { // continue; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // message_time -= WallTimer::get_time_mark(); // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == h_i) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // // Process every element of unit_buffer // for (const E_T &e : unit_buffer) { // fun(e); // } // } // } //} // Function: Host root broadcasts its sending buffer to a receiving buffer. template <VertexID BATCH_SIZE> template <typename E_T> inline void DistBVCPLL<BATCH_SIZE>:: one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv) { const size_t ETypeSize = sizeof(E_T); uint64_t size_buffer_send = buffer_send.size(); // Sync the size_buffer_send. message_time -= WallTimer::get_time_mark(); MPI_Bcast(&size_buffer_send, 1, MPI_UINT64_T, root, MPI_COMM_WORLD); message_time += WallTimer::get_time_mark(); buffer_recv.resize(size_buffer_send); if (!size_buffer_send) { return; } // Broadcast the buffer_send message_time -= WallTimer::get_time_mark(); if (host_id == root) { buffer_recv.assign(buffer_send.begin(), buffer_send.end()); } uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) { // Only need 1 broadcast MPI_Bcast(buffer_recv.data(), bytes_buffer_send, MPI_CHAR, root, MPI_COMM_WORLD); } else { const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; size_t offset = 0; for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { size_t size_unit_buffer = b_i == num_unit_buffers - 1 ? size_buffer_send - offset : unit_buffer_size; MPI_Bcast(buffer_recv.data() + offset, size_unit_buffer * ETypeSize, MPI_CHAR, root, MPI_COMM_WORLD); offset += unit_buffer_size; } } message_time += WallTimer::get_time_mark(); } //// DEPRECATED Function: Host root broadcasts its sending buffer to a receiving buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //one_host_bcasts_buffer_to_buffer( // int root, // std::vector<E_T> &buffer_send, // std::vector<E_T> &buffer_recv) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // root, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // buffer_recv.resize(size_buffer_send); // if (!size_buffer_send) { // return; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // message_time -= WallTimer::get_time_mark(); // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == root) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // root, // MPI_COMM_WORLD); // // Copy unit buffer to buffer_recv // std::copy(unit_buffer.begin(), unit_buffer.end(), buffer_recv.begin() + offset); // } // message_time += WallTimer::get_time_mark(); //} //// Function: Distance query of a pair of vertices, used for distrubuted version. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline UnweightedDist DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //dist_distance_query_pair( // VertexID a_input, // VertexID b_input, // const DistGraph &G) //{ // struct TmpMsgBPLabel { // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // TmpMsgBPLabel() = default; // TmpMsgBPLabel(const UnweightedDist dist[], const uint64_t sets[][2]) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // // VertexID a_global = G.rank[a_input]; // VertexID b_global = G.rank[b_input]; // int a_host_id = G.get_master_host_id(a_global); // int b_host_id = G.get_master_host_id(b_global); // UnweightedDist min_d = MAX_UNWEIGHTED_DIST; // // // Both local // if (a_host_id == host_id && b_host_id == host_id) { // VertexID a_local = G.get_local_vertex_id(a_global); // VertexID b_local = G.get_local_vertex_id(b_global); // // Check Bit-Parallel Labels first // { // const IndexType &La = L[a_local]; // const IndexType &Lb = L[b_local]; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = La.bp_dist[i] + Lb.bp_dist[i]; // if (td - 2 <= min_d) { // td += // (La.bp_sets[i][0] & Lb.bp_sets[i][0]) ? -2 : // ((La.bp_sets[i][0] & Lb.bp_sets[i][1]) | // (La.bp_sets[i][1] & Lb.bp_sets[i][0])) // ? -1 : 0; // if (td < min_d) { // min_d = td; // } // } // } // } // // std::map<VertexID, UnweightedDist> markers; // // Traverse a's labels // { // const IndexType &Lr = L[a_local]; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // markers[label_id] = dist; // } // } // } // } // // Traverse b's labels // { // const IndexType &Lr = L[b_local]; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // const auto &tmp_l = markers.find(label_id); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + dist; // if (d < min_d) { // min_d = d; // } // } // } // } // } // } else { // // Host b_host_id sends to host a_host_id, then host a_host_id do the query // if (host_id == b_host_id) { // VertexID b_local = G.get_local_vertex_id(b_global); // const IndexType &Lr = L[b_local]; // // Bit-Parallel Labels // { // TmpMsgBPLabel msg_send(Lr.bp_dist, Lr.bp_sets); // MPI_Send(&msg_send, // sizeof(msg_send), // MPI_CHAR, // a_host_id, // SENDING_QUERY_BP_LABELS, // MPI_COMM_WORLD); // } // // Normal Labels // { // std::vector<std::pair<VertexID, UnweightedDist> > buffer_send; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // buffer_send.emplace_back(label_id, dist); // } // } // } // // MPI_Instance::send_buffer_2_dst(buffer_send, // a_host_id, // SENDING_QUERY_LABELS, // SENDING_SIZE_QUERY_LABELS); //// ///////////////////////////////////////////////// //// // //// std::vector<MPI_Request> requests_list; //// MPI_Instance::send_buffer_2_dest(buffer_send, //// requests_list, //// a_host_id, //// SENDING_QUERY_LABELS, //// SENDING_SIZE_QUERY_LABELS); //// MPI_Waitall(requests_list.size(), //// requests_list.data(), //// MPI_STATUSES_IGNORE); //// // //// ///////////////////////////////////////////////// // } // } else if (host_id == a_host_id) { // VertexID a_local = G.get_local_vertex_id(a_global); // const IndexType &Lr = L[a_local]; // // Receive BP labels // { // TmpMsgBPLabel msg_recv; // MPI_Recv(&msg_recv, // sizeof(msg_recv), // MPI_CHAR, // b_host_id, // SENDING_QUERY_BP_LABELS, // MPI_COMM_WORLD, // MPI_STATUS_IGNORE); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lr.bp_dist[i] + msg_recv.bp_dist[i]; // if (td - 2 <= min_d) { // td += // (Lr.bp_sets[i][0] & msg_recv.bp_sets[i][0]) ? -2 : // ((Lr.bp_sets[i][0] & msg_recv.bp_sets[i][1]) | // (Lr.bp_sets[i][1] & msg_recv.bp_sets[i][0])) // ? -1 : 0; // if (td < min_d) { // min_d = td; // } // } // } // } // std::map<VertexID, UnweightedDist> markers; // // Traverse a's labels // { // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID label_id = Lr.vertices[v_i] + id_offset; // markers[label_id] = dist; // } // } // } // } // // Receive b's labels // { // std::vector<std::pair<VertexID, UnweightedDist> > buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // b_host_id, // SENDING_QUERY_LABELS, // SENDING_SIZE_QUERY_LABELS); //// MPI_Instance::recv_buffer_from_source(buffer_recv, //// b_host_id, //// SENDING_QUERY_LABELS, //// SENDING_SIZE_QUERY_LABELS); // // for (const auto &l : buffer_recv) { // VertexID label_id = l.first; // const auto &tmp_l = markers.find(label_id); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + l.second; // if (d < min_d) { // min_d = d; // } // } // } // } // } // MPI_Allreduce(MPI_IN_PLACE, // &min_d, // 1, // MPI_Instance::get_mpi_datatype<UnweightedDist>(), // MPI_MIN, // MPI_COMM_WORLD); // return min_d; //} } #endif //PADO_DPADO_H
for-7.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ extern void bar(int); void foo (int n) { int i; #pragma omp for schedule(nonmonotonic:runtime) for (i = 0; i < n; ++i) bar(i); } /* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_runtime_start" 1 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_runtime_next" 1 "ompexp" } } */
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} Index volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
tutorial_region_prof.c
/* * Copyright (c) 2015 - 2021, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <stdint.h> #include <mpi.h> #ifdef _OPENMP #include <omp.h> #endif #include <geopm.h> #include "tutorial_region.h" #ifdef _OPENMP static int stream_profiled_omp(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; int err = 0; #pragma omp parallel for for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_tprof_post(); } #pragma omp parallel for for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return err; } #endif static int stream_profiled_serial(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; geopm_tprof_init(num_block); for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_tprof_post(); } for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return 0; } int tutorial_stream_profiled(double big_o, int do_report) { int err = 0; if (big_o != 0.0) { size_t cline_size = 64; size_t num_stream = (size_t)big_o * 500000000; size_t mem_size = sizeof(double) * num_stream; double *a = NULL; double *b = NULL; double *c = NULL; double scalar = 3.0; uint64_t stream_rid; if (!err) { err = geopm_prof_region("tutorial_stream", GEOPM_REGION_HINT_MEMORY, &stream_rid); } err = posix_memalign((void *)&a, cline_size, mem_size); if (!err) { err = posix_memalign((void *)&b, cline_size, mem_size); } if (!err) { err = posix_memalign((void *)&c, cline_size, mem_size); } if (!err) { #pragma omp parallel for for (int i = 0; i < num_stream; i++) { a[i] = 0.0; b[i] = 1.0; c[i] = 2.0; } if (do_report) { printf("Executing profiled STREAM triad on length %d vectors.\n", num_stream); fflush(stdout); } err = geopm_prof_enter(stream_rid); } if (!err) { #ifdef _OPENMP err = stream_profiled_omp(stream_rid, num_stream, scalar, a, b, c); #else err = stream_profiled_serial(stream_rid, num_stream, scalar, a, b, c); #endif } if (!err) { err = geopm_prof_exit(stream_rid); } if (!err) { free(c); free(b); free(a); } } }
GB_unaryop__lnot_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int8 // op(A') function: GB_tran__lnot_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OpenmpUtil.h
#ifndef OPENMP_UTILITIES #define OPENMP_UTILITIES //#include <stdio.h> #include <omp.h> #include <assert.h> int get_CPU_num_threads() { int procs; #pragma omp parallel shared(procs) { int th_id = omp_get_thread_num(); if(th_id == 0) { procs = omp_get_num_threads(); } } bool valid_procs = (procs > 0) && (procs <= 1024); assert(valid_procs && "Number of threads NOT in {1, ..., 1024}"); return procs; } #endif //OPENMP_UTILITIES
omp-mat-mul.c
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <omp.h> #define NUM_THREADS 4 #define ROWS 1600 #define COLS 1600 #define tasks 1600 void printArray(double* a, int rows, int cols) { for (int i=0; i<rows; i++) { for (int j=0; j<cols; j++) { printf("%.2f ", *(a + i*cols + j)); } printf("\n"); } printf("\n\n\n"); } double* makeArray(int rows, int cols) { double* arr = (double*) malloc(rows*cols*sizeof(double)); for (int r=0; r<rows; r++) { for (int c=0; c<cols; c++) { *(arr + r*cols + c) = (double) (rows*c + c); } } return arr; } int min(int i, int j) { return i<j ? i : j; } int main (int argc, char *argv[]) { omp_set_num_threads(NUM_THREADS); const int stripeSize = COLS/tasks; double* a = makeArray(ROWS, COLS); double* b = makeArray(ROWS, COLS); double* c = makeArray(ROWS, COLS); int t,i,j,k; double timer = -omp_get_wtime(); #pragma omp parallel for shared(a,b,c) private(i,j,k,t) for (t=0; t<tasks; t++) { for (i=t*stripeSize; i<min(t*stripeSize+stripeSize, ROWS); i++) { for (j=0; j<COLS; j++) { double comp = 0.; for (k=0; k<COLS; k++) { comp += *(a + i*COLS + k) * *(b + k*COLS + j); } *(c + i*COLS + j) = comp; } } } timer += omp_get_wtime(); printf("time taken for matrix multiply: %f\n", timer); // printArray(a, ROWS, COLS); // printArray(b, ROWS, COLS); // printArray(c, ROWS, COLS); }
dynmat.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <stdlib.h> #include <dynmat.h> #define PI 3.14159265358979323846 static void get_dynmat_ij(double *dynamical_matrix, const int num_patom, const int num_satom, const double *fc, const double q[3], PHPYCONST double (*svecs)[27][3], const int *multi, const double *mass, const int *s2p_map, const int *p2s_map, PHPYCONST double (*charge_sum)[3][3], const int i, const int j); static void get_dm(double dm_real[3][3], double dm_imag[3][3], const int num_patom, const int num_satom, const double *fc, const double q[3], PHPYCONST double (*svecs)[27][3], const int *multi, const int *p2s_map, PHPYCONST double (*charge_sum)[3][3], const int i, const int j, const int k); static double get_dielectric_part(const double q_cart[3], PHPYCONST double dielectric[3][3]); static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */ PHPYCONST double (*G_list)[3], /* [num_G, 3] */ const int num_G, const int num_patom, const double q_cart[3], const double *q_direction_cart, PHPYCONST double dielectric[3][3], PHPYCONST double (*pos)[3], /* [num_patom, 3] */ const double lambda, const double tolerance); static void make_Hermitian(double *mat, const int num_band); static void multiply_borns(double *dd, const double *dd_in, const int num_patom, PHPYCONST double (*born)[3][3]); int dym_get_dynamical_matrix_at_q(double *dynamical_matrix, const int num_patom, const int num_satom, const double *fc, const double q[3], PHPYCONST double (*svecs)[27][3], const int *multi, const double *mass, const int *s2p_map, const int *p2s_map, PHPYCONST double (*charge_sum)[3][3], const int with_openmp) { int i, j, ij; if (with_openmp) { #pragma omp parallel for for (ij = 0; ij < num_patom * num_patom ; ij++) { get_dynmat_ij(dynamical_matrix, num_patom, num_satom, fc, q, svecs, multi, mass, s2p_map, p2s_map, charge_sum, ij / num_patom, /* i */ ij % num_patom); /* j */ } } else { for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { get_dynmat_ij(dynamical_matrix, num_patom, num_satom, fc, q, svecs, multi, mass, s2p_map, p2s_map, charge_sum, i, j); } } } make_Hermitian(dynamical_matrix, num_patom * 3); return 0; } void dym_get_recip_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */ const double *dd_q0, /* [natom, 3, 3, (real,imag)] */ PHPYCONST double (*G_list)[3], /* [num_G, 3] */ const int num_G, const int num_patom, const double q_cart[3], const double *q_direction_cart, /* must be pointer */ PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double (*pos)[3], /* [num_patom, 3] */ const double factor, /* 4pi/V*unit-conv */ const double lambda, const double tolerance) { int i, k, l, adrs, adrs_sum; double *dd_tmp; dd_tmp = NULL; dd_tmp = (double*) malloc(sizeof(double) * num_patom * num_patom * 18); for (i = 0; i < num_patom * num_patom * 18; i++) { dd[i] = 0; dd_tmp[i] = 0; } get_KK(dd_tmp, G_list, num_G, num_patom, q_cart, q_direction_cart, dielectric, pos, lambda, tolerance); multiply_borns(dd, dd_tmp, num_patom, born); for (i = 0; i < num_patom; i++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l; adrs_sum = i * 9 + k * 3 + l; dd[adrs * 2] -= dd_q0[adrs_sum * 2]; dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1]; } } } for (i = 0; i < num_patom * num_patom * 18; i++) { dd[i] *= factor; } /* This may not be necessary. */ /* make_Hermitian(dd, num_patom * 3); */ free(dd_tmp); dd_tmp = NULL; } void dym_get_recip_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */ PHPYCONST double (*G_list)[3], /* [num_G, 3] */ const int num_G, const int num_patom, PHPYCONST double (*born)[3][3], PHPYCONST double dielectric[3][3], PHPYCONST double (*pos)[3], /* [num_patom, 3] */ const double lambda, const double tolerance) { int i, j, k, l, adrs_tmp, adrs, adrsT; double zero_vec[3]; double *dd_tmp1, *dd_tmp2; dd_tmp1 = NULL; dd_tmp1 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18); dd_tmp2 = NULL; dd_tmp2 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18); for (i = 0; i < num_patom * num_patom * 18; i++) { dd_tmp1[i] = 0; dd_tmp2[i] = 0; } zero_vec[0] = 0; zero_vec[1] = 0; zero_vec[2] = 0; get_KK(dd_tmp1, G_list, num_G, num_patom, zero_vec, NULL, dielectric, pos, lambda, tolerance); multiply_borns(dd_tmp2, dd_tmp1, num_patom, born); for (i = 0; i < num_patom * 18; i++) { dd_q0[i] = 0; } for (i = 0; i < num_patom; i++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * 9 + k * 3 + l; for (j = 0; j < num_patom; j++) { adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; } } } } /* Summation over another atomic index */ /* for (j = 0; j < num_patom; j++) { */ /* for (k = 0; k < 3; k++) { /\* alpha *\/ */ /* for (l = 0; l < 3; l++) { /\* beta *\/ */ /* adrs = j * 9 + k * 3 + l; */ /* for (i = 0; i < num_patom; i++) { */ /* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */ /* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */ /* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */ /* } */ /* } */ /* } */ /* } */ for (i = 0; i < num_patom; i++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * 9 + k * 3 + l; adrsT = i * 9 + l * 3 + k; dd_q0[adrs * 2] += dd_q0[adrsT * 2]; dd_q0[adrs * 2] /= 2; dd_q0[adrsT * 2] = dd_q0[adrs * 2]; dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1]; dd_q0[adrs * 2 + 1] /= 2; dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1]; } } } free(dd_tmp1); dd_tmp1 = NULL; free(dd_tmp2); dd_tmp2 = NULL; } void dym_get_charge_sum(double (*charge_sum)[3][3], const int num_patom, const double factor, /* 4pi/V*unit-conv and denominator */ const double q_cart[3], PHPYCONST double (*born)[3][3]) { int i, j, k, a, b; double (*q_born)[3]; q_born = (double (*)[3]) malloc(sizeof(double[3]) * num_patom); for (i = 0; i < num_patom; i++) { for (j = 0; j < 3; j++) { q_born[i][j] = 0; } } for (i = 0; i < num_patom; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { q_born[i][j] += q_cart[k] * born[i][k][j]; } } } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { for (a = 0; a < 3; a++) { for (b = 0; b < 3; b++) { charge_sum[i * num_patom + j][a][b] = q_born[i][a] * q_born[j][b] * factor; } } } } free(q_born); q_born = NULL; } /* fc[num_patom, num_satom, 3, 3] */ /* dm[num_comm_points, num_patom * 3, num_patom *3] */ /* comm_points[num_satom, num_patom, 27, 3] */ /* shortest_vectors[num_satom, num_patom, 27, 3] */ /* multiplicities[num_satom, num_patom] */ void dym_transform_dynmat_to_fc(double *fc, const double *dm, PHPYCONST double (*comm_points)[3], PHPYCONST double (*shortest_vectors)[27][3], const int *multiplicities, const double *masses, const int *s2pp_map, const int *fc_index_map, const int num_patom, const int num_satom) { int i, j, k, l, m, N, adrs, multi; double coef, phase, cos_phase, sin_phase; N = num_satom / num_patom; for (i = 0; i < num_patom * num_satom * 9; i++) { fc[i] = 0; } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_satom; j++) { coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N; for (k = 0; k < N; k++) { cos_phase = 0; sin_phase = 0; multi = multiplicities[j * num_patom + i]; for (l = 0; l < multi; l++) { phase = 0; for (m = 0; m < 3; m++) { phase -= comm_points[k][m] * shortest_vectors[j * num_patom + i][l][m]; } cos_phase += cos(phase * 2 * PI); sin_phase += sin(phase * 2 * PI); } cos_phase /= multi; sin_phase /= multi; for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 + l * num_patom * 6 + s2pp_map[j] * 6 + m * 2; fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 + m] += (dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef; } } } } } } static void get_dynmat_ij(double *dynamical_matrix, const int num_patom, const int num_satom, const double *fc, const double q[3], PHPYCONST double (*svecs)[27][3], const int *multi, const double *mass, const int *s2p_map, const int *p2s_map, PHPYCONST double (*charge_sum)[3][3], const int i, const int j) { int k, l, adrs; double mass_sqrt; double dm_real[3][3], dm_imag[3][3]; mass_sqrt = sqrt(mass[i] * mass[j]); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { dm_real[k][l] = 0; dm_imag[k][l] = 0; } } for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */ if (s2p_map[k] != p2s_map[j]) { continue; } get_dm(dm_real, dm_imag, num_patom, num_satom, fc, q, svecs, multi, p2s_map, charge_sum, i, j, k); } for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l; dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt; dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt; } } } static void get_dm(double dm_real[3][3], double dm_imag[3][3], const int num_patom, const int num_satom, const double *fc, const double q[3], PHPYCONST double (*svecs)[27][3], const int *multi, const int *p2s_map, PHPYCONST double (*charge_sum)[3][3], const int i, const int j, const int k) { int l, m; double phase, cos_phase, sin_phase, fc_elem; cos_phase = 0; sin_phase = 0; for (l = 0; l < multi[k * num_patom + i]; l++) { phase = 0; for (m = 0; m < 3; m++) { phase += q[m] * svecs[k * num_patom + i][l][m]; } cos_phase += cos(phase * 2 * PI) / multi[k * num_patom + i]; sin_phase += sin(phase * 2 * PI) / multi[k * num_patom + i]; } for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { if (charge_sum) { fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] + charge_sum[i * num_patom + j][l][m]); } else { fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m]; } dm_real[l][m] += fc_elem * cos_phase; dm_imag[l][m] += fc_elem * sin_phase; } } } static double get_dielectric_part(const double q_cart[3], PHPYCONST double dielectric[3][3]) { int i, j; double x[3]; double sum; for (i = 0; i < 3; i++) { x[i] = 0; for (j = 0; j < 3; j++) { x[i] += dielectric[i][j] * q_cart[j]; } } sum = 0; for (i = 0; i < 3; i++) { sum += q_cart[i] * x[i]; } return sum; } static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */ PHPYCONST double (*G_list)[3], /* [num_G, 3] */ const int num_G, const int num_patom, const double q_cart[3], const double *q_direction_cart, PHPYCONST double dielectric[3][3], PHPYCONST double (*pos)[3], /* [num_patom, 3] */ const double lambda, const double tolerance) { int i, j, k, l, g, adrs; double q_K[3]; double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2; double KK[3][3]; L2 = 4 * lambda * lambda; /* sum over K = G + q and over G (i.e. q=0) */ /* q_direction has values for summation over K at Gamma point. */ /* q_direction is NULL for summation over G */ for (g = 0; g < num_G; g++) { norm = 0; for (i = 0; i < 3; i++) { q_K[i] = G_list[g][i] + q_cart[i]; norm += q_K[i] * q_K[i]; } if (sqrt(norm) < tolerance) { if (!q_direction_cart) { continue; } else { dielectric_part = get_dielectric_part(q_direction_cart, dielectric); for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { KK[i][j] = q_direction_cart[i] * q_direction_cart[j] / dielectric_part; } } } } else { dielectric_part = get_dielectric_part(q_K, dielectric); exp_damp = exp(-dielectric_part / L2); for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp; } } } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { phase = 0; for (k = 0; k < 3; k++) { /* For D-type dynamical matrix */ /* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */ /* For C-type dynamical matrix */ phase += (pos[i][k] - pos[j][k]) * G_list[g][k]; } phase *= 2 * PI; cos_phase = cos(phase); sin_phase = sin(phase); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; dd_part[adrs * 2] += KK[k][l] * cos_phase; dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase; } } } } } } static void make_Hermitian(double *mat, const int num_band) { int i, j, adrs, adrsT; for (i = 0; i < num_band; i++) { for (j = i; j < num_band; j++) { adrs = i * num_band + j * 1; adrs *= 2; adrsT = j * num_band + i * 1; adrsT *= 2; /* real part */ mat[adrs] += mat[adrsT]; mat[adrs] /= 2; /* imaginary part */ mat[adrs + 1] -= mat[adrsT+ 1]; mat[adrs + 1] /= 2; /* store */ mat[adrsT] = mat[adrs]; mat[adrsT + 1] = -mat[adrs + 1]; } } } static void multiply_borns(double *dd, const double *dd_in, const int num_patom, PHPYCONST double (*born)[3][3]) { int i, j, k, l, m, n, adrs, adrs_in; double zz; for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { for (k = 0; k < 3; k++) { /* alpha */ for (l = 0; l < 3; l++) { /* beta */ adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; for (m = 0; m < 3; m++) { /* alpha' */ for (n = 0; n < 3; n++) { /* beta' */ adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n ; zz = born[i][m][k] * born[j][n][l]; dd[adrs * 2] += dd_in[adrs_in * 2] * zz; dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz; } } } } } } }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
softmax.h
// Copyright (C) 2018 Intel Corporation // // SPDX-License-Identifier: Apache-2.0 // #pragma once #define USE_FAST_EXP 0 #if USE_FAST_EXP #include "fast_exp.h" #else #include "opt_exp.h" #endif #include <cmath> #include <omp.h> #include "defs.h" static inline void softmax_many_batches(const float *src_data, float *dst_data, int B, int C, int H, int W) { #pragma omp parallel for schedule(static) for (int i = 0; i < B * H * W; i++) { const float *psrc = src_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W; float *pdst = dst_data + (i / (H * W)) * C * H * W - (i / (H * W)) * H * W; float max = psrc[i]; for (int c = 0; c < C; c++) { float val = psrc[c * H * W + i]; if (val > max) max = val; } float expSum = 0; for (int c = 0; c < C; c++) { pdst[c * H * W + i] = exp(psrc[c * H * W + i] - max); expSum += pdst[c * H * W + i]; } for (int c = 0; c < C; c++) { pdst[c * H * W + i] = pdst[c * H * W + i] / expSum; } } } static inline void softmax_generic(const float *src_data, float *dst_data, int B, int C, int H, int W) { for (int b = 0; b < B; b++) { #if defined(HAVE_AVX2) #pragma omp parallel for schedule(static) for (int i = 0; i <= H*W - 8; i += 8) { __m256 vmax = _mm256_loadu_ps(src_data + b*C*H*W + i); for (int c = 0; c < C; c++) { __m256 vval = _mm256_loadu_ps(src_data + b*C*H*W + c*H*W + i); __m256 vmask = _mm256_cmp_ps(vval, vmax, _CMP_GT_OS); vmax = _mm256_blendv_ps(vmax, vval, vmask); } __m256 vexpSum = _mm256_setzero_ps(); for (int c = 0; c < C; c++) { __m256 vval = _mm256_loadu_ps(src_data + b*C*H*W + c*H*W + i); #if USE_FAST_EXP __m256 vres = _avx_fast_exp_ps(_mm256_sub_ps(vval, vmax)); #else __m256 vres = _avx_opt_exp_ps(_mm256_sub_ps(vval, vmax)); #endif vexpSum = _mm256_add_ps(vexpSum, vres); _mm256_storeu_ps(dst_data + b*C*H*W + c*H*W + i, vres); } for (int c = 0; c < C; c++) { __m256 vval = _mm256_loadu_ps(dst_data + b*C*H*W + c*H*W + i); _mm256_storeu_ps(dst_data + b*C*H*W + c*H*W + i, _mm256_div_ps(vval, vexpSum)); } } #elif defined(HAVE_SSE) #pragma omp parallel for schedule(static) for (int i = 0; i <= H*W - 4; i += 4) { __m128 vmax = _mm_loadu_ps(src_data + b*C*H*W + i); for (int c = 0; c < C; c++) { __m128 vval = _mm_loadu_ps(src_data + b*C*H*W + c*H*W + i); __m128 vmask = _mm_cmpgt_ps(vval, vmax); vmax = _mm_blendv_ps(vmax, vval, vmask); } __m128 vexpSum = _mm_setzero_ps(); for (int c = 0; c < C; c++) { __m128 vval = _mm_loadu_ps(src_data + b*C*H*W + c*H*W + i); #if USE_FAST_EXP __m128 vres = _sse_fast_exp_ps(_mm_sub_ps(vval, vmax)); #else __m128 vres = _sse_opt_exp_ps(_mm_sub_ps(vval, vmax)); #endif vexpSum = _mm_add_ps(vexpSum, vres); _mm_storeu_ps(dst_data + b*C*H*W + c*H*W + i, vres); } for (int c = 0; c < C; c++) { __m128 vval = _mm_loadu_ps(dst_data + b*C*H*W + c*H*W + i); _mm_storeu_ps(dst_data + b*C*H*W + c*H*W + i, _mm_div_ps(vval, vexpSum)); } } #endif #if defined(HAVE_AVX2) int start = (H*W / 8) * 8; #elif defined(HAVE_SSE) int start = (H*W / 4) * 4; #else int start = 0; #endif for (int i = start; i < H * W; i++) { float max = src_data[b * C * H * W + i]; for (int c = 0; c < C; c++) { float val = src_data[b * C * H * W + c * H * W + i]; if (val > max) max = val; } float expSum = 0; for (int c = 0; c < C; c++) { dst_data[b * C * H * W + c * H * W + i] = exp(src_data[b * C * H * W + c * H * W + i] - max); expSum += dst_data[b * C * H * W + c * H * W + i]; } for (int c = 0; c < C; c++) { dst_data[b * C * H * W + c * H * W + i] = dst_data[b * C * H * W + c * H * W + i] / expSum; } } } }
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/feature.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; register Quantum *q; register ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (Quantum *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MagickPathExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; register const Quantum *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } *q=0; q+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; register const Quantum *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif proceed=SetImageProgress(image,CannyEdgeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageFeatures(image,1,exception); % contrast=channel_features[RedPixelChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageFeatures method is: % % ChannelFeatures *GetImageFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { PixelInfo direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; PixelPacket gray, *grays; MagickBooleanType status; register ssize_t i, r; size_t length; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=MaxPixelChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (PixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].alpha=(~0U); grays[i].black=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (r=0; r < (ssize_t) image->rows; r++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(image,p))].red= ScaleQuantumToMap(GetPixelRed(image,p)); grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green= ScaleQuantumToMap(GetPixelGreen(image,p)); grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue= ScaleQuantumToMap(GetPixelBlue(image,p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black= ScaleQuantumToMap(GetPixelBlack(image,p)); if (image->alpha_trait != UndefinedPixelTrait) grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha= ScaleQuantumToMap(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) ResetMagickMemory(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].black != ~0U) grays[gray.black++].black=grays[i].black; if (image->alpha_trait != UndefinedPixelTrait) if (grays[i].alpha != ~0U) grays[gray.alpha++].alpha=grays[i].alpha; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.black > number_grays) number_grays=gray.black; if (image->alpha_trait != UndefinedPixelTrait) if (gray.alpha > number_grays) number_grays=gray.alpha; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) ResetMagickMemory(&correlation,0,sizeof(correlation)); (void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) ResetMagickMemory(&mean,0,sizeof(mean)); (void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); (void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x)); (void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy)); (void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1)); (void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2)); (void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y)); (void) ResetMagickMemory(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) ResetMagickMemory(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (r=0; r < (ssize_t) image->rows; r++) { register const Quantum *magick_restrict p; register ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+ 2*distance,distance+2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=distance*GetPixelChannels(image);; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p))) u++; while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p))) u++; while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].black++; cooccurrence[v][u].direction[i].black++; } if (image->alpha_trait != UndefinedPixelTrait) { u=0; v=0; while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p))) u++; while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].alpha++; cooccurrence[v][u].direction[i].alpha++; } } p+=GetPixelChannels(image); } } grays=(PixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; register ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].black*=normalize; if (image->alpha_trait != UndefinedPixelTrait) cooccurrence[x][y].direction[i].alpha*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BluePixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].black* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].alpha* cooccurrence[x][y].direction[i].alpha; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].black+=x*y* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) correlation.direction[i].alpha+=x*y* cooccurrence[x][y].direction[i].alpha; /* Inverse Difference Moment. */ channel_features[RedPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BluePixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[y+x+2].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Entropy. */ channel_features[RedPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BluePixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].alpha* MagickLog10(cooccurrence[x][y].direction[i].alpha); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->alpha_trait != UndefinedPixelTrait) density_x[x].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].black+= cooccurrence[x][y].direction[i].black; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_y[y].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].black+=y*sum[y].direction[i].black; sum_squares.direction[i].black+=y*y*sum[y].direction[i].black; } if (image->alpha_trait != UndefinedPixelTrait) { mean.direction[i].alpha+=y*sum[y].direction[i].alpha; sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedPixelChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenPixelChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BluePixelChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].correlation[i]= (correlation.direction[i].black-mean.direction[i].black* mean.direction[i].black)/(sqrt(sum_squares.direction[i].black- (mean.direction[i].black*mean.direction[i].black))*sqrt( sum_squares.direction[i].black-(mean.direction[i].black* mean.direction[i].black))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].correlation[i]= (correlation.direction[i].alpha-mean.direction[i].alpha* mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha- (mean.direction[i].alpha*mean.direction[i].alpha))*sqrt( sum_squares.direction[i].alpha-(mean.direction[i].alpha* mean.direction[i].alpha))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].alpha; /* Sum entropy. */ channel_features[RedPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Sum variance. */ channel_features[RedPixelChannel].sum_variance[i]+= (x-channel_features[RedPixelChannel].sum_entropy[i])* (x-channel_features[RedPixelChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_variance[i]+= (x-channel_features[GreenPixelChannel].sum_entropy[i])* (x-channel_features[GreenPixelChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_variance[i]+= (x-channel_features[BluePixelChannel].sum_entropy[i])* (x-channel_features[BluePixelChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_variance[i]+= (x-channel_features[BlackPixelChannel].sum_entropy[i])* (x-channel_features[BlackPixelChannel].sum_entropy[i])* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_variance[i]+= (x-channel_features[AlphaPixelChannel].sum_entropy[i])* (x-channel_features[AlphaPixelChannel].sum_entropy[i])* density_xy[x].direction[i].alpha; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=(y-mean.direction[i].black+1)* (y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)* (y-mean.direction[i].alpha+1)* cooccurrence[x][y].direction[i].alpha; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy.direction[i].alpha-= cooccurrence[x][y].direction[i].alpha*MagickLog10( cooccurrence[x][y].direction[i].alpha); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].black-=( cooccurrence[x][y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy1.direction[i].alpha-=( cooccurrence[x][y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].black-=(density_x[x].direction[i].black* density_y[y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha* density_y[y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); } } channel_features[RedPixelChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenPixelChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BluePixelChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].variance_sum_of_squares[i]= variance.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].variance_sum_of_squares[i]= variance.direction[i].alpha; } /* Compute more texture features. */ (void) ResetMagickMemory(&variance,0,sizeof(variance)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=density_xy[x].direction[i].alpha; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].black+=density_xy[x].direction[i].black* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha* density_xy[x].direction[i].alpha; /* Difference entropy. */ channel_features[RedPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].black-=(density_x[x].direction[i].black* MagickLog10(density_x[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha* MagickLog10(density_x[x].direction[i].alpha)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].black-=(density_y[x].direction[i].black* MagickLog10(density_y[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha* MagickLog10(density_y[x].direction[i].alpha)); } /* Difference variance. */ channel_features[RedPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BluePixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].black)- (variance.direction[i].black*variance.direction[i].black))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].alpha)- (variance.direction[i].alpha*variance.direction[i].alpha))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BluePixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/ (entropy_x.direction[i].black > entropy_y.direction[i].black ? entropy_x.direction[i].black : entropy_y.direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/ (entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ? entropy_x.direction[i].alpha : entropy_y.direction[i].alpha); channel_features[RedPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BluePixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black- entropy_xy.direction[i].black))))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha- entropy_xy.direction[i].alpha))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) pixel.direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } /* Maximum Correlation Coefficient. */ Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/ density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black* cooccurrence[y][x].direction[i].black/ density_x[z].direction[i].black/density_y[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) Q[z][y].direction[i].alpha+= cooccurrence[z][x].direction[i].alpha* cooccurrence[y][x].direction[i].alpha/ density_x[z].direction[i].alpha/ density_y[x].direction[i].alpha; } } channel_features[RedPixelChannel].contrast[i]+=z*z* pixel.direction[i].red; channel_features[GreenPixelChannel].contrast[i]+=z*z* pixel.direction[i].green; channel_features[BluePixelChannel].contrast[i]+=z*z* pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].contrast[i]+=z*z* pixel.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].contrast[i]+=z*z* pixel.direction[i].alpha; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BluePixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator matrix % of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next % it searches this space for peaks in counts and converts the locations of the % peaks to slope and intercept in the normal x,y input image space. Use the % slope/intercepts to find the endpoints clipped to the bounds of the image. The % lines are then drawn. The counts are a measure of the length of the lines % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/ DefaultResolution; draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireMagickMemory((size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) CopyMagickMemory(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MagickPathExtent], path[MagickPathExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; register ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { register ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif proceed=SetImageProgress(image,CannyEdgeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MagickPathExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { register ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MagickPathExtent, "line %g,%g %g,%g # %g\n",line.x1,line.y1,line.x2,line.y2,maxima); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsStringTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse) { mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status,progress) \ magick_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) mean_image->columns; x++) { PixelInfo mean_pixel, previous_pixel; PointInfo mean_location, previous_location; register ssize_t i; GetPixelInfo(image,&mean_pixel); GetPixelInfoPixel(image,p,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; PixelInfo sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetPixelInfo(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelInfo pixel; status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.alpha+=pixel.alpha; count++; } } } } gamma=1.0/count; mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.alpha=gamma*sum_pixel.alpha; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q); SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q); SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q); SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q); p+=GetPixelChannels(image); q+=GetPixelChannels(mean_image); } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MeanShiftImage) #endif proceed=SetImageProgress(image,MeanShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
base_ptr_ref_count.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-x86_64-pc-linux-gnu 2>&1 | %fcheck-x86_64-pc-linux-gnu // RUN: %libomptarget-compile-nvptx64-nvidia-cuda && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-nvptx64-nvidia-cuda 2>&1 | %fcheck-nvptx64-nvidia-cuda // REQUIRES: libomptarget-debug #include <stdlib.h> #include <stdio.h> int *allocate(size_t n) { int *ptr = malloc(sizeof(int) * n); #pragma omp target enter data map(to : ptr[:n]) return ptr; } void deallocate(int *ptr, size_t n) { #pragma omp target exit data map(delete : ptr[:n]) free(ptr); } #pragma omp declare target int *cnt; void foo() { ++(*cnt); } #pragma omp end declare target int main(void) { int *A = allocate(10); int *V = allocate(10); deallocate(A, 10); deallocate(V, 10); // CHECK-NOT: RefCount=2 cnt = malloc(sizeof(int)); *cnt = 0; #pragma omp target data map(cnt[:1]) #pragma omp target foo(); printf("Cnt = %d.\n", *cnt); // CHECK: Cnt = 1. free(cnt); return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
main.c
/** @file @brief Main function for APPFS ex10. Reads in data, starts worker threads and outputs results. @author Tri-Peter Shrive */ #include "ex10.h" int main( int argc, const char* const* const argv ) { printf( "\n" ); if( 2 > argc || 4 < argc ) { fprintf( stderr, "USAGE: %s *.gph ( number of terminals to start from ) ( -s )\n", argv[0] ); exit( EXIT_FAILURE ); } int i; int return_value = 0; unsigned int s_flag = 0; unsigned int max_num_start_terminals = 0; for( i = 2; i < argc; i++ ) { // optionally print steiner tree if( 0 == strcmp( argv[i], "-s" ) ) s_flag = 1; // set number of terminals to start from else if( 2 == i ) return_value = sscanf( argv[i], "%u", &max_num_start_terminals ); } // default is 100 if( 1 != return_value ) max_num_start_terminals = 100; // open file for reading FILE* fp = NULL; fp = fopen( argv[1], "r" ); assert( fp ); // read line unsigned int line_number = 0; char line[MAX_LINE_LENGTH]; fgets( line, sizeof( line ), fp ); line_number++; // remove comments and anything after newline char* character = NULL; character = strpbrk( line, "#\n" ); if( NULL != character ) *character = '\0'; // skip initial spaces for( character = &line[0]; isspace( *character ); character++ ); assert( '\0' != *character ); unsigned int num_nodes; unsigned int num_edges; // read first line return_value = sscanf( character, "%u %u", &num_nodes, &num_edges ); // undirected graph num_edges += num_edges; // node 0 may not exist num_nodes++; unsigned int array_next = 0; unsigned int array_length = EXTENSION_LENGTH; unsigned int* tails = NULL; unsigned int* heads = NULL; unsigned long long int* edge_weights = NULL; unsigned int* num_neighbours = NULL; tails = malloc( array_length * sizeof( unsigned int ) ); heads = malloc( array_length * sizeof( unsigned int ) ); edge_weights = malloc( array_length * sizeof( unsigned long long int ) ); num_neighbours = calloc( num_nodes, sizeof( unsigned int ) ); assert( tails ); assert( heads ); assert( edge_weights ); assert( num_neighbours ); unsigned int temp_tail = INT_MAX; unsigned int temp_head = INT_MAX; unsigned long long int temp_edge_weight = LLONG_MAX; while( NULL != fgets( line, sizeof(line), fp ) ) { line_number++; // remove comments and anything after newline character = strpbrk( line, "#\n" ); if( NULL != character ) *character = '\0'; // skip initial spaces for( character = &line[0]; isspace( *character ); character++ ); // skip line if is empty if( '\0' == *character ) continue; // read edge entries return_value = sscanf( character, "%u %u %llu", &temp_tail, &temp_head, &temp_edge_weight ); if( 3 != return_value ) { fprintf( stderr, "\nWARNING: line %u, sscanf returned %u != 3\n\n", line_number, return_value ); continue; } if( num_nodes <= temp_tail ) { fprintf( stderr, "\nWARNING: line %u, tail > number of nodes\n\n", line_number ); continue; } if( num_nodes <= temp_head ) { fprintf( stderr, "\nWARNING: line %u, head > number of nodes\n\n", line_number ); continue; } if( 2000000000 <= temp_edge_weight ) { fprintf( stderr, "\nWARNING: line %u, edge_weight >= 2000000000\n\n", line_number ); continue; } // check there's enough space // if not enlarge array if( array_next == array_length ) { array_length += EXTENSION_LENGTH; tails = realloc( tails, array_length * sizeof( unsigned int ) ); heads = realloc( heads, array_length * sizeof( unsigned int ) ); edge_weights = realloc( edge_weights, array_length * sizeof( unsigned long long int ) ); assert( tails ); assert( heads ); assert( edge_weights ); } // store edge attributes in memory tails[array_next] = temp_tail; heads[array_next] = temp_head; edge_weights[array_next] = temp_edge_weight; array_next++; num_neighbours[temp_tail]++; // repeat with head and tail inverted for undirected graph if( array_next == array_length ) { array_length += EXTENSION_LENGTH; tails = realloc( tails, array_length * sizeof( unsigned int ) ); heads = realloc( heads, array_length * sizeof( unsigned int ) ); edge_weights = realloc( edge_weights, array_length * sizeof( unsigned long long int ) ); assert( tails ); assert( heads); assert( edge_weights ); } // store reversed edge attributes in memory tails[array_next] = temp_head; heads[array_next] = temp_tail; edge_weights[array_next] = temp_edge_weight; array_next++; num_neighbours[temp_head]++; } assert( 0 == fclose( fp ) ); assert( array_next == num_edges ); // calculate index of first neighbour for sorted lists unsigned int j; unsigned int* first_neighbours_index = NULL; first_neighbours_index = calloc( num_nodes, sizeof( unsigned int ) ); assert( first_neighbours_index ); for( j = 1; j < num_nodes; j++ ) first_neighbours_index[j] = first_neighbours_index[j - 1] + num_neighbours[j - 1]; // sort heads and weights by tails unsigned int* num_neighbours_found = NULL; unsigned int* sorted_heads = NULL; unsigned int* sorted_tails = NULL; unsigned long long int* sorted_weights = NULL; num_neighbours_found = calloc( num_nodes, sizeof( unsigned int ) ); sorted_heads = malloc( num_edges * sizeof( unsigned int ) ); sorted_tails = malloc( num_edges * sizeof( unsigned int ) ); sorted_weights = malloc( num_edges * sizeof( unsigned long long int ) ); assert( num_neighbours_found ); assert( sorted_heads ); assert( sorted_tails ); assert( sorted_weights ); unsigned int k; unsigned int l; unsigned long long m; unsigned int index; for( j = 0; j < num_edges; j++ ) { k = tails[j]; l = heads[j]; m = edge_weights[j]; assert( k < num_nodes ); assert( l < num_nodes ); index = first_neighbours_index[k] + num_neighbours_found[k]; sorted_weights[index] = m; sorted_heads[index] = l; sorted_tails[index] = k; num_neighbours_found[k]++; } free( tails ); free( heads ); free( edge_weights ); #ifndef NDEBUG // make sure we still have all edges unsigned int total_neighbours = 0; for( j = 0; j < num_nodes; j++ ) { total_neighbours += num_neighbours_found[j]; } assert( num_edges == total_neighbours ); #endif free( num_neighbours_found ); // calculate prime numbers unsigned int* is_prime = NULL; is_prime = calloc( num_nodes, sizeof( unsigned int ) ); assert( is_prime ); unsigned int num_terminals = get_primes( is_prime, num_nodes ); unsigned int* terminals = NULL; terminals = malloc( num_terminals * sizeof( unsigned int ) ); assert( terminals ); k = 0; for( j = 0; j < num_nodes; j++ ) if( 1 == is_prime[j] ) { terminals[k] = j; k++; } assert( k == num_terminals ); printf( "NODES: %u\n", num_nodes ); printf( "EDGES: %u\n", num_edges/2 ); printf( "TERMINALS: %u\n",num_terminals ); printf( "\n" ); // we don't want to attempt to start from more terminals than there are if( num_terminals < max_num_start_terminals ) max_num_start_terminals = num_terminals; // start wall clock struct timeval start_wall; unsigned int fail = 1; fail = gettimeofday( &start_wall, NULL ); assert( 0 == fail ); // start counting cpu clocks double start_cpu = ( double ) clock() / ( double ) CLOCKS_PER_SEC; // calculate steiner tree unsigned int* tree = NULL; unsigned int* prev = NULL; tree = calloc( num_edges, sizeof( unsigned int ) ); prev = malloc( num_nodes * sizeof( unsigned int ) ); assert( prev ); assert( tree ); unsigned long long int obj_value = LLONG_MAX; unsigned int* temp_tree = NULL; unsigned int* prev_edge_index = NULL; #ifdef THREADS #pragma omp parallel for default( none ) shared( prev, tree, obj_value, is_prime, terminals, num_terminals, max_num_start_terminals, first_neighbours_index, num_neighbours, num_nodes, num_edges, sorted_weights, sorted_heads, sorted_tails ) private( j, k, temp_tree, prev_edge_index ) num_threads( THREADS ) #endif for( j = 0; j < max_num_start_terminals; j++ ) { temp_tree = calloc( num_edges, sizeof( unsigned int ) ); prev_edge_index = malloc( num_nodes * sizeof( unsigned int ) ); assert( temp_tree ); assert( prev_edge_index ); unsigned int terminals_connected = 0; unsigned int source = terminals[j]; terminals_connected = get_steiner_tree( is_prime, terminals, num_terminals, num_nodes, num_edges, first_neighbours_index, num_neighbours, sorted_weights, sorted_heads, sorted_tails, temp_tree, prev_edge_index, source ); unsigned int temp_obj_value = 0; for( k = 0; k < num_edges; k++ ) { if( 1 == temp_tree[k] ) { temp_obj_value += sorted_weights[k]; } } #ifdef THREADS #pragma omp critical ( objective_value ) #endif if( temp_obj_value < obj_value && terminals_connected == num_terminals ) { obj_value = temp_obj_value; memcpy( tree, temp_tree, num_edges * sizeof( unsigned int ) ); memcpy( prev, prev_edge_index, num_nodes * sizeof( unsigned int ) ); } free( temp_tree ); free( prev_edge_index ); } // stop wall clock struct timeval stop_wall; fail = 1; fail = gettimeofday( &stop_wall, NULL ); assert( 0 == fail ); // stop counting cpu clocks double stop_cpu = ( double ) clock() / ( double ) CLOCKS_PER_SEC; // calculate durations of both double duration_wall = ( stop_wall.tv_sec + stop_wall.tv_usec * 0.000001 ) - ( start_wall.tv_sec + start_wall.tv_usec * 0.000001 ); double duration_cpu = stop_cpu - start_cpu; if( s_flag ) { printf( "TREE: " ); for( j = 0; j < num_edges; j++ ) if( tree[j] ) printf( "(%u,%u) ", sorted_tails[j], sorted_heads[j] ); printf( "\n\n" ); } printf( "TLEN: %llu\n", obj_value ); printf( "TIME: %lf sec\n", duration_cpu ); printf( "WALL: %lf sec\n", duration_wall ); printf( "\n" ); int tree_valid = 0; tree_valid = is_tree_valid( sorted_heads, sorted_tails, prev, terminals, num_terminals, num_nodes, num_edges); ( 1 == tree_valid ) ? printf( "TREE: TRUE\n" ) : printf( "TREE: FALSE\n" ); printf( "\n" ); free( tree ); free( is_prime ); free( terminals ); free( first_neighbours_index ); free( num_neighbours ); free( sorted_weights ); free( sorted_heads ); free( sorted_tails ); free( prev ); return 0; }
task_tied_scheduling.c
// RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run // REQUIRES: abt #include "omp_testsuite.h" #include "bolt_scheduling_util.h" int test_task_tied_scheduling() { int i, vals[6]; memset(vals, 0, sizeof(int) * 6); timeout_barrier_t barrier; timeout_barrier_init(&barrier); #pragma omp parallel num_threads(4) { // 6 barrier_waits in tasks and 2 barrier_waits in threads #pragma omp master { check_num_ess(4); for (i = 0; i < 6; i++) { #pragma omp task firstprivate(i) { timeout_barrier_wait(&barrier, 4); vals[i] = 1; } } } if (omp_get_thread_num() < 2) { timeout_barrier_wait(&barrier, 4); } } for (i = 0; i < 6; i++) { if (vals[i] != 1) { printf("vals[%d] == %d\n", i, vals[i]); return 0; } } return 1; } int main() { int i, num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_task_tied_scheduling(i)) { num_failed++; } } return num_failed; }
gimple.h
/* Gimple IR definitions. Copyright 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_GIMPLE_H #define GCC_GIMPLE_H #include "pointer-set.h" #include "vec.h" #include "vecprim.h" #include "vecir.h" #include "ggc.h" #include "basic-block.h" #include "tree-ssa-operands.h" #include "tree-ssa-alias.h" #include "internal-fn.h" struct gimple_seq_node_d; typedef struct gimple_seq_node_d *gimple_seq_node; typedef const struct gimple_seq_node_d *const_gimple_seq_node; /* For each block, the PHI nodes that need to be rewritten are stored into these vectors. */ typedef VEC(gimple, heap) *gimple_vec; DEF_VEC_P (gimple_vec); DEF_VEC_ALLOC_P (gimple_vec, heap); enum gimple_code { #define DEFGSCODE(SYM, STRING, STRUCT) SYM, #include "gimple.def" #undef DEFGSCODE LAST_AND_UNUSED_GIMPLE_CODE }; extern const char *const gimple_code_name[]; extern const unsigned char gimple_rhs_class_table[]; /* Error out if a gimple tuple is addressed incorrectly. */ #if defined ENABLE_GIMPLE_CHECKING #define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR) extern void gimple_check_failed (const_gimple, const char *, int, \ const char *, enum gimple_code, \ enum tree_code) ATTRIBUTE_NORETURN; #define GIMPLE_CHECK(GS, CODE) \ do { \ const_gimple __gs = (GS); \ if (gimple_code (__gs) != (CODE)) \ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \ (CODE), ERROR_MARK); \ } while (0) #else /* not ENABLE_GIMPLE_CHECKING */ #define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR))) #define GIMPLE_CHECK(GS, CODE) (void)0 #endif /* Class of GIMPLE expressions suitable for the RHS of assignments. See get_gimple_rhs_class. */ enum gimple_rhs_class { GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA name, a _DECL, a _REF, etc. */ }; /* Specific flags for individual GIMPLE statements. These flags are always stored in gimple_statement_base.subcode and they may only be defined for statement codes that do not use sub-codes. Values for the masks can overlap as long as the overlapping values are never used in the same statement class. The maximum mask value that can be defined is 1 << 15 (i.e., each statement code can hold up to 16 bitflags). Keep this list sorted. */ enum gf_mask { GF_ASM_INPUT = 1 << 0, GF_ASM_VOLATILE = 1 << 1, GF_CALL_FROM_THUNK = 1 << 0, GF_CALL_RETURN_SLOT_OPT = 1 << 1, GF_CALL_TAILCALL = 1 << 2, GF_CALL_VA_ARG_PACK = 1 << 3, GF_CALL_NOTHROW = 1 << 4, GF_CALL_ALLOCA_FOR_VAR = 1 << 5, GF_CALL_INTERNAL = 1 << 6, GF_OMP_PARALLEL_COMBINED = 1 << 0, /* True on an GIMPLE_OMP_RETURN statement if the return does not require a thread synchronization via some sort of barrier. The exact barrier that would otherwise be emitted is dependent on the OMP statement with which this return is associated. */ GF_OMP_RETURN_NOWAIT = 1 << 0, GF_OMP_SECTION_LAST = 1 << 0, GF_OMP_ATOMIC_NEED_VALUE = 1 << 0, GF_PREDICT_TAKEN = 1 << 15 }; /* Currently, there are only two types of gimple debug stmt. Others are envisioned, for example, to enable the generation of is_stmt notes in line number information, to mark sequence points, etc. This subcode is to be used to tell them apart. */ enum gimple_debug_subcode { GIMPLE_DEBUG_BIND = 0, GIMPLE_DEBUG_SOURCE_BIND = 1 }; /* Masks for selecting a pass local flag (PLF) to work on. These masks are used by gimple_set_plf and gimple_plf. */ enum plf_mask { GF_PLF_1 = 1 << 0, GF_PLF_2 = 1 << 1 }; /* A node in a gimple_seq_d. */ struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d { gimple stmt; struct gimple_seq_node_d *prev; struct gimple_seq_node_d *next; }; /* A double-linked sequence of gimple statements. */ struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d { /* First and last statements in the sequence. */ gimple_seq_node first; gimple_seq_node last; /* Sequences are created/destroyed frequently. To minimize allocation activity, deallocated sequences are kept in a pool of available sequences. This is the pointer to the next free sequence in the pool. */ gimple_seq next_free; }; /* Return the first node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_first (const_gimple_seq s) { return s ? s->first : NULL; } /* Return the first statement in GIMPLE sequence S. */ static inline gimple gimple_seq_first_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_first (s); return (n) ? n->stmt : NULL; } /* Return the last node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_last (const_gimple_seq s) { return s ? s->last : NULL; } /* Return the last statement in GIMPLE sequence S. */ static inline gimple gimple_seq_last_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_last (s); return (n) ? n->stmt : NULL; } /* Set the last node in GIMPLE sequence S to LAST. */ static inline void gimple_seq_set_last (gimple_seq s, gimple_seq_node last) { s->last = last; } /* Set the first node in GIMPLE sequence S to FIRST. */ static inline void gimple_seq_set_first (gimple_seq s, gimple_seq_node first) { s->first = first; } /* Return true if GIMPLE sequence S is empty. */ static inline bool gimple_seq_empty_p (const_gimple_seq s) { return s == NULL || s->first == NULL; } void gimple_seq_add_stmt (gimple_seq *, gimple); /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimple_seq_add_stmt_without_update (gimple_seq *, gimple); /* Allocate a new sequence and initialize its first element with STMT. */ static inline gimple_seq gimple_seq_alloc_with_stmt (gimple stmt) { gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, stmt); return seq; } /* Returns the sequence of statements in BB. */ static inline gimple_seq bb_seq (const_basic_block bb) { return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL; } /* Sets the sequence of statements in BB to SEQ. */ static inline void set_bb_seq (basic_block bb, gimple_seq seq) { gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple->seq = seq; } /* Iterator object for GIMPLE statement sequences. */ typedef struct { /* Sequence node holding the current statement. */ gimple_seq_node ptr; /* Sequence and basic block holding the statement. These fields are necessary to handle edge cases such as when statement is added to an empty basic block or when the last statement of a block/sequence is removed. */ gimple_seq seq; basic_block bb; } gimple_stmt_iterator; /* Data structure definitions for GIMPLE tuples. NOTE: word markers are for 64 bit hosts. */ struct GTY(()) gimple_statement_base { /* [ WORD 1 ] Main identifying code for a tuple. */ ENUM_BITFIELD(gimple_code) code : 8; /* Nonzero if a warning should not be emitted on this tuple. */ unsigned int no_warning : 1; /* Nonzero if this tuple has been visited. Passes are responsible for clearing this bit before using it. */ unsigned int visited : 1; /* Nonzero if this tuple represents a non-temporal move. */ unsigned int nontemporal_move : 1; /* Pass local flags. These flags are free for any pass to use as they see fit. Passes should not assume that these flags contain any useful value when the pass starts. Any initial state that the pass requires should be set on entry to the pass. See gimple_set_plf and gimple_plf for usage. */ unsigned int plf : 2; /* Nonzero if this statement has been modified and needs to have its operands rescanned. */ unsigned modified : 1; /* Nonzero if this statement contains volatile operands. */ unsigned has_volatile_ops : 1; /* Padding to get subcode to 16 bit alignment. */ unsigned pad : 1; /* The SUBCODE field can be used for tuple-specific flags for tuples that do not require subcodes. Note that SUBCODE should be at least as wide as tree codes, as several tuples store tree codes in there. */ unsigned int subcode : 16; /* UID of this statement. This is used by passes that want to assign IDs to statements. It must be assigned and used by each pass. By default it should be assumed to contain garbage. */ unsigned uid; /* [ WORD 2 ] Locus information for debug info. */ location_t location; /* Number of operands in this tuple. */ unsigned num_ops; /* [ WORD 3 ] Basic block holding this statement. */ struct basic_block_def *bb; /* [ WORD 4 ] Lexical block holding this statement. */ tree block; }; /* Base structure for tuples with operands. */ struct GTY(()) gimple_statement_with_ops_base { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] SSA operand vectors. NOTE: It should be possible to amalgamate these vectors with the operand vector OP. However, the SSA operand vectors are organized differently and contain more information (like immediate use chaining). */ struct def_optype_d GTY((skip (""))) *def_ops; struct use_optype_d GTY((skip (""))) *use_ops; }; /* Statements that take register operands. */ struct GTY(()) gimple_statement_with_ops { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1]; }; /* Base for statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops_base { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7-8 ] Virtual operands for this statement. The GC will pick them up via the ssa_names array. */ tree GTY((skip (""))) vdef; tree GTY((skip (""))) vuse; }; /* Statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* Call statements that take both memory and register operands. */ struct GTY(()) gimple_statement_call { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9-12 ] */ struct pt_solution call_used; struct pt_solution call_clobbered; /* [ WORD 13 ] */ union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) { tree GTY ((tag ("0"))) fntype; enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn; } u; /* [ WORD 14 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* OpenMP statements (#pragma omp). */ struct GTY(()) gimple_statement_omp { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ gimple_seq body; }; /* GIMPLE_BIND */ struct GTY(()) gimple_statement_bind { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Variables declared in this scope. */ tree vars; /* [ WORD 6 ] This is different than the BLOCK field in gimple_statement_base, which is analogous to TREE_BLOCK (i.e., the lexical block holding this statement). This field is the equivalent of BIND_EXPR_BLOCK in tree land (i.e., the lexical scope defined by this bind). See gimple-low.c. */ tree block; /* [ WORD 7 ] */ gimple_seq body; }; /* GIMPLE_CATCH */ struct GTY(()) gimple_statement_catch { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree types; /* [ WORD 6 ] */ gimple_seq handler; }; /* GIMPLE_EH_FILTER */ struct GTY(()) gimple_statement_eh_filter { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Filter types. */ tree types; /* [ WORD 6 ] Failure actions. */ gimple_seq failure; }; /* GIMPLE_EH_ELSE */ struct GTY(()) gimple_statement_eh_else { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5,6 ] */ gimple_seq n_body, e_body; }; /* GIMPLE_EH_MUST_NOT_THROW */ struct GTY(()) gimple_statement_eh_mnt { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Abort function decl. */ tree fndecl; }; /* GIMPLE_PHI */ struct GTY(()) gimple_statement_phi { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ unsigned capacity; unsigned nargs; /* [ WORD 6 ] */ tree result; /* [ WORD 7 ] */ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1]; }; /* GIMPLE_RESX, GIMPLE_EH_DISPATCH */ struct GTY(()) gimple_statement_eh_ctrl { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Exception region number. */ int region; }; /* GIMPLE_TRY */ struct GTY(()) gimple_statement_try { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Expression to evaluate. */ gimple_seq eval; /* [ WORD 6 ] Cleanup expression. */ gimple_seq cleanup; }; /* Kind of GIMPLE_TRY statements. */ enum gimple_try_flags { /* A try/catch. */ GIMPLE_TRY_CATCH = 1 << 0, /* A try/finally. */ GIMPLE_TRY_FINALLY = 1 << 1, GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY, /* Analogous to TRY_CATCH_IS_CLEANUP. */ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2 }; /* GIMPLE_WITH_CLEANUP_EXPR */ struct GTY(()) gimple_statement_wce { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be executed if an exception is thrown, not on normal exit of its scope. This flag is analogous to the CLEANUP_EH_ONLY flag in TARGET_EXPRs. */ /* [ WORD 5 ] Cleanup expression. */ gimple_seq cleanup; }; /* GIMPLE_ASM */ struct GTY(()) gimple_statement_asm { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] __asm__ statement. */ const char *string; /* [ WORD 10 ] Number of inputs, outputs, clobbers, labels. */ unsigned char ni; unsigned char no; unsigned char nc; unsigned char nl; /* [ WORD 11 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* GIMPLE_OMP_CRITICAL */ struct GTY(()) gimple_statement_omp_critical { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Critical section name. */ tree name; }; struct GTY(()) gimple_omp_for_iter { /* Condition code. */ enum tree_code cond; /* Index variable. */ tree index; /* Initial value. */ tree initial; /* Final value. */ tree final; /* Increment. */ tree incr; }; /* GIMPLE_OMP_FOR */ struct GTY(()) gimple_statement_omp_for { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] Number of elements in iter array. */ size_t collapse; /* [ WORD 8 ] */ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter; /* [ WORD 9 ] Pre-body evaluated before the loop body begins. */ gimple_seq pre_body; }; /* GIMPLE_OMP_PARALLEL */ struct GTY(()) gimple_statement_omp_parallel { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Clauses. */ tree clauses; /* [ WORD 7 ] Child function holding the body of the parallel region. */ tree child_fn; /* [ WORD 8 ] Shared data argument. */ tree data_arg; }; /* GIMPLE_OMP_TASK */ struct GTY(()) gimple_statement_omp_task { /* [ WORD 1-8 ] */ struct gimple_statement_omp_parallel par; /* [ WORD 9 ] Child function holding firstprivate initialization if needed. */ tree copy_fn; /* [ WORD 10-11 ] Size and alignment in bytes of the argument data block. */ tree arg_size; tree arg_align; }; /* GIMPLE_OMP_SECTION */ /* Uses struct gimple_statement_omp. */ /* GIMPLE_OMP_SECTIONS */ struct GTY(()) gimple_statement_omp_sections { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] The control variable used for deciding which of the sections to execute. */ tree control; }; /* GIMPLE_OMP_CONTINUE. Note: This does not inherit from gimple_statement_omp, because we do not need the body field. */ struct GTY(()) gimple_statement_omp_continue { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree control_def; /* [ WORD 6 ] */ tree control_use; }; /* GIMPLE_OMP_SINGLE */ struct GTY(()) gimple_statement_omp_single { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; }; /* GIMPLE_OMP_ATOMIC_LOAD. Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp contains a sequence, which we don't need here. */ struct GTY(()) gimple_statement_omp_atomic_load { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] */ tree rhs, lhs; }; /* GIMPLE_OMP_ATOMIC_STORE. See note on GIMPLE_OMP_ATOMIC_LOAD. */ struct GTY(()) gimple_statement_omp_atomic_store { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree val; }; /* GIMPLE_TRANSACTION. */ /* Bits to be stored in the GIMPLE_TRANSACTION subcode. */ /* The __transaction_atomic was declared [[outer]] or it is __transaction_relaxed. */ #define GTMA_IS_OUTER (1u << 0) #define GTMA_IS_RELAXED (1u << 1) #define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED) /* The transaction is seen to not have an abort. */ #define GTMA_HAVE_ABORT (1u << 2) /* The transaction is seen to have loads or stores. */ #define GTMA_HAVE_LOAD (1u << 3) #define GTMA_HAVE_STORE (1u << 4) /* The transaction MAY enter serial irrevocable mode in its dynamic scope. */ #define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5) /* The transaction WILL enter serial irrevocable mode. An irrevocable block post-dominates the entire transaction, such that all invocations of the transaction will go serial-irrevocable. In such case, we don't bother instrumenting the transaction, and tell the runtime that it should begin the transaction in serial-irrevocable mode. */ #define GTMA_DOES_GO_IRREVOCABLE (1u << 6) struct GTY(()) gimple_statement_transaction { /* [ WORD 1-10 ] */ struct gimple_statement_with_memory_ops_base gsbase; /* [ WORD 11 ] */ gimple_seq body; /* [ WORD 12 ] */ tree label; }; #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM, enum gimple_statement_structure_enum { #include "gsstruct.def" LAST_GSS_ENUM }; #undef DEFGSSTRUCT /* Define the overall contents of a gimple tuple. It may be any of the structures declared above for various types of tuples. */ union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_statement_d { struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase; struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops; struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase; struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem; struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call; struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp; struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind; struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch; struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter; struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt; struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else; struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi; struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl; struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try; struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce; struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm; struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical; struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for; struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel; struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task; struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections; struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single; struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue; struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load; struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store; struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction; }; /* In gimple.c. */ /* Offset in bytes to the location of the operand vector. Zero if there is no operand vector for this tuple structure. */ extern size_t const gimple_ops_offset_[]; /* Map GIMPLE codes to GSS codes. */ extern enum gimple_statement_structure_enum const gss_for_code_[]; /* This variable holds the currently expanded gimple statement for purposes of comminucating the profile info to the builtin expanders. */ extern gimple currently_expanding_gimple_stmt; gimple gimple_build_return (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *); gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree, tree, tree MEM_STAT_DECL); #define gimple_build_assign_with_ops(c,o1,o2,o3) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, NULL_TREE MEM_STAT_INFO) #define gimple_build_assign_with_ops3(c,o1,o2,o3,o4) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, o4 MEM_STAT_INFO) gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_bind(var,val,stmt) \ gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_source_bind(var,val,stmt) \ gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_call_vec (tree, VEC(tree, heap) *); gimple gimple_build_call (tree, unsigned, ...); gimple gimple_build_call_valist (tree, unsigned, va_list); gimple gimple_build_call_internal (enum internal_fn, unsigned, ...); gimple gimple_build_call_internal_vec (enum internal_fn, VEC(tree, heap) *); gimple gimple_build_call_from_tree (tree); gimple gimplify_assign (tree, tree, gimple_seq *); gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree); gimple gimple_build_label (tree label); gimple gimple_build_goto (tree dest); gimple gimple_build_nop (void); gimple gimple_build_bind (tree, gimple_seq, tree); gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *); gimple gimple_build_catch (tree, gimple_seq); gimple gimple_build_eh_filter (tree, gimple_seq); gimple gimple_build_eh_must_not_throw (tree); gimple gimple_build_eh_else (gimple_seq, gimple_seq); gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags); gimple gimple_build_wce (gimple_seq); gimple gimple_build_resx (int); gimple gimple_build_eh_dispatch (int); gimple gimple_build_switch_nlabels (unsigned, tree, tree); gimple gimple_build_switch (unsigned, tree, tree, ...); gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *); gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree); gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree); gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq); gimple gimple_build_omp_critical (gimple_seq, tree); gimple gimple_build_omp_section (gimple_seq); gimple gimple_build_omp_continue (tree, tree); gimple gimple_build_omp_master (gimple_seq); gimple gimple_build_omp_return (bool); gimple gimple_build_omp_ordered (gimple_seq); gimple gimple_build_omp_sections (gimple_seq, tree); gimple gimple_build_omp_sections_switch (void); gimple gimple_build_omp_single (gimple_seq, tree); gimple gimple_build_cdt (tree, tree); gimple gimple_build_omp_atomic_load (tree, tree); gimple gimple_build_omp_atomic_store (tree); gimple gimple_build_transaction (gimple_seq, tree); gimple gimple_build_predict (enum br_predictor, enum prediction); enum gimple_statement_structure_enum gss_for_assign (enum tree_code); void sort_case_labels (VEC(tree,heap) *); void gimple_set_body (tree, gimple_seq); gimple_seq gimple_body (tree); bool gimple_has_body_p (tree); gimple_seq gimple_seq_alloc (void); void gimple_seq_free (gimple_seq); void gimple_seq_add_seq (gimple_seq *, gimple_seq); gimple_seq gimple_seq_copy (gimple_seq); bool gimple_call_same_target_p (const_gimple, const_gimple); int gimple_call_flags (const_gimple); int gimple_call_return_flags (const_gimple); int gimple_call_arg_flags (const_gimple, unsigned); void gimple_call_reset_alias_info (gimple); bool gimple_assign_copy_p (gimple); bool gimple_assign_ssa_name_copy_p (gimple); bool gimple_assign_unary_nop_p (gimple); void gimple_set_bb (gimple, struct basic_block_def *); void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree); void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code, tree, tree, tree); tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); void gimple_replace_lhs (gimple, tree); gimple gimple_copy (gimple); void gimple_set_modified (gimple, bool); void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); bool gimple_has_side_effects (const_gimple); bool gimple_could_trap_p (gimple); bool gimple_could_trap_p_1 (gimple, bool, bool); bool gimple_assign_rhs_could_trap_p (gimple); void gimple_regimplify_operands (gimple, gimple_stmt_iterator *); bool empty_body_p (gimple_seq); unsigned get_gimple_rhs_num_ops (enum tree_code); #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); const char *gimple_decl_printable_name (tree, int); tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree); void gimple_adjust_this_by_delta (gimple_stmt_iterator *, tree); tree gimple_extract_devirt_binfo_from_cst (tree); /* Returns true iff T is a valid GIMPLE statement. */ extern bool is_gimple_stmt (tree); /* Returns true iff T is a scalar register variable. */ extern bool is_gimple_reg (tree); /* Returns true iff T is any sort of variable. */ extern bool is_gimple_variable (tree); /* Returns true iff T is any sort of symbol. */ extern bool is_gimple_id (tree); /* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ extern bool is_gimple_min_lval (tree); /* Returns true iff T is something whose address can be taken. */ extern bool is_gimple_addressable (tree); /* Returns true iff T is any valid GIMPLE lvalue. */ extern bool is_gimple_lvalue (tree); /* Returns true iff T is a GIMPLE address. */ bool is_gimple_address (const_tree); /* Returns true iff T is a GIMPLE invariant address. */ bool is_gimple_invariant_address (const_tree); /* Returns true iff T is a GIMPLE invariant address at interprocedural level. */ bool is_gimple_ip_invariant_address (const_tree); /* Returns true iff T is a valid GIMPLE constant. */ bool is_gimple_constant (const_tree); /* Returns true iff T is a GIMPLE restricted function invariant. */ extern bool is_gimple_min_invariant (const_tree); /* Returns true iff T is a GIMPLE restricted interprecodural invariant. */ extern bool is_gimple_ip_invariant (const_tree); /* Returns true iff T is a GIMPLE rvalue. */ extern bool is_gimple_val (tree); /* Returns true iff T is a GIMPLE asm statement input. */ extern bool is_gimple_asm_val (tree); /* Returns true iff T is a valid address operand of a MEM_REF. */ bool is_gimple_mem_ref_addr (tree); /* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a GIMPLE temporary, a renamed user variable, or something else, respectively. */ extern bool is_gimple_reg_rhs (tree); extern bool is_gimple_mem_rhs (tree); /* Returns true iff T is a valid if-statement condition. */ extern bool is_gimple_condexpr (tree); /* Returns true iff T is a valid call address expression. */ extern bool is_gimple_call_addr (tree); extern void recalculate_side_effects (tree); extern bool gimple_compare_field_offset (tree, tree); extern tree gimple_register_type (tree); extern tree gimple_register_canonical_type (tree); extern void print_gimple_types_stats (void); extern void free_gimple_type_tables (void); extern tree gimple_unsigned_type (tree); extern tree gimple_signed_type (tree); extern alias_set_type gimple_get_alias_set (tree); extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *, unsigned *); extern bool walk_stmt_load_store_addr_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool walk_stmt_load_store_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_call_builtin_class_p (gimple, enum built_in_class); extern bool gimple_call_builtin_p (gimple, enum built_in_function); extern bool gimple_asm_clobbers_memory_p (const_gimple); /* In gimplify.c */ extern tree create_tmp_var_raw (tree, const char *); extern tree create_tmp_var_name (const char *); extern tree create_tmp_var (tree, const char *); extern tree create_tmp_reg (tree, const char *); extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *); extern tree get_formal_tmp_var (tree, gimple_seq *); extern void declare_vars (tree, gimple, bool); extern void annotate_all_with_location (gimple_seq, location_t); /* Validation of GIMPLE expressions. Note that these predicates only check the basic form of the expression, they don't recurse to make sure that underlying nodes are also of the right form. */ typedef bool (*gimple_predicate)(tree); /* FIXME we should deduce this from the predicate. */ enum fallback { fb_none = 0, /* Do not generate a temporary. */ fb_rvalue = 1, /* Generate an rvalue to hold the result of a gimplified expression. */ fb_lvalue = 2, /* Generate an lvalue to hold the result of a gimplified expression. */ fb_mayfail = 4, /* Gimplification may fail. Error issued afterwards. */ fb_either= fb_rvalue | fb_lvalue }; typedef int fallback_t; enum gimplify_status { GS_ERROR = -2, /* Something Bad Seen. */ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */ GS_OK = 0, /* We did something, maybe more to do. */ GS_ALL_DONE = 1 /* The expression is fully gimplified. */ }; struct gimplify_ctx { struct gimplify_ctx *prev_context; VEC(gimple,heap) *bind_expr_stack; tree temps; gimple_seq conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; bool allow_rhs_cond_expr; bool in_cleanup_point_expr; }; /* Return true if gimplify_one_sizepos doesn't need to gimplify expr (when in TYPE_SIZE{,_UNIT} and similar type/decl size/bitsize fields). */ static inline bool is_gimple_sizepos (tree expr) { /* gimplify_one_sizepos doesn't need to do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ return (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)); } extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *, bool (*) (tree), fallback_t); extern void gimplify_type_sizes (tree, gimple_seq *); extern void gimplify_one_sizepos (tree *, gimple_seq *); extern bool gimplify_stmt (tree *, gimple_seq *); extern gimple gimplify_body (tree, bool); extern void push_gimplify_context (struct gimplify_ctx *); extern void pop_gimplify_context (gimple); extern void gimplify_and_add (tree, gimple_seq *); /* Miscellaneous helpers. */ extern void gimple_add_tmp_var (tree); extern gimple gimple_current_bind_expr (void); extern VEC(gimple, heap) *gimple_bind_expr_stack (void); extern tree voidify_wrapper_expr (tree, tree); extern tree build_and_jump (tree *); extern tree force_labels_r (tree *, int *, void *); extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *, gimple_seq *); struct gimplify_omp_ctx; extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree); extern tree gimple_boolify (tree); extern gimple_predicate rhs_predicate_for (tree); extern tree canonicalize_cond_expr_cond (tree); /* In omp-low.c. */ extern tree omp_reduction_init (tree, tree); /* In trans-mem.c. */ extern void diagnose_tm_safe_errors (tree); extern void compute_transaction_bits (void); /* In tree-nested.c. */ extern void lower_nested_functions (tree); extern void insert_field_into_struct (tree, tree); /* In gimplify.c. */ extern void gimplify_function_tree (tree); /* In cfgexpand.c. */ extern tree gimple_assign_rhs_to_tree (gimple); /* In builtins.c */ extern bool validate_gimple_arglist (const_gimple, ...); /* In tree-ssa.c */ extern bool tree_ssa_useless_type_conversion (tree); extern tree tree_ssa_strip_useless_type_conversions (tree); extern bool useless_type_conversion_p (tree, tree); extern bool types_compatible_p (tree, tree); /* Return the code for GIMPLE statement G. */ static inline enum gimple_code gimple_code (const_gimple g) { return g->gsbase.code; } /* Return the GSS code used by a GIMPLE code. */ static inline enum gimple_statement_structure_enum gss_for_code (enum gimple_code code) { gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE); return gss_for_code_[code]; } /* Return which GSS code is used by GS. */ static inline enum gimple_statement_structure_enum gimple_statement_structure (gimple gs) { return gss_for_code (gimple_code (gs)); } /* Return true if statement G has sub-statements. This is only true for High GIMPLE statements. */ static inline bool gimple_has_substatements (gimple g) { switch (gimple_code (g)) { case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_EH_ELSE: case GIMPLE_TRY: case GIMPLE_OMP_FOR: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_CRITICAL: case GIMPLE_WITH_CLEANUP_EXPR: case GIMPLE_TRANSACTION: return true; default: return false; } } /* Return the basic block holding statement G. */ static inline struct basic_block_def * gimple_bb (const_gimple g) { return g->gsbase.bb; } /* Return the lexical scope block holding statement G. */ static inline tree gimple_block (const_gimple g) { return g->gsbase.block; } /* Set BLOCK to be the lexical scope block holding statement G. */ static inline void gimple_set_block (gimple g, tree block) { g->gsbase.block = block; } /* Return location information for statement G. */ static inline location_t gimple_location (const_gimple g) { return g->gsbase.location; } /* Return pointer to location information for statement G. */ static inline const location_t * gimple_location_ptr (const_gimple g) { return &g->gsbase.location; } /* Set location information for statement G. */ static inline void gimple_set_location (gimple g, location_t location) { g->gsbase.location = location; } /* Return true if G contains location information. */ static inline bool gimple_has_location (const_gimple g) { return gimple_location (g) != UNKNOWN_LOCATION; } /* Return the file name of the location of STMT. */ static inline const char * gimple_filename (const_gimple stmt) { return LOCATION_FILE (gimple_location (stmt)); } /* Return the line number of the location of STMT. */ static inline int gimple_lineno (const_gimple stmt) { return LOCATION_LINE (gimple_location (stmt)); } /* Determine whether SEQ is a singleton. */ static inline bool gimple_seq_singleton_p (gimple_seq seq) { return ((gimple_seq_first (seq) != NULL) && (gimple_seq_first (seq) == gimple_seq_last (seq))); } /* Return true if no warnings should be emitted for statement STMT. */ static inline bool gimple_no_warning_p (const_gimple stmt) { return stmt->gsbase.no_warning; } /* Set the no_warning flag of STMT to NO_WARNING. */ static inline void gimple_set_no_warning (gimple stmt, bool no_warning) { stmt->gsbase.no_warning = (unsigned) no_warning; } /* Set the visited status on statement STMT to VISITED_P. */ static inline void gimple_set_visited (gimple stmt, bool visited_p) { stmt->gsbase.visited = (unsigned) visited_p; } /* Return the visited status for statement STMT. */ static inline bool gimple_visited_p (gimple stmt) { return stmt->gsbase.visited; } /* Set pass local flag PLF on statement STMT to VAL_P. */ static inline void gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p) { if (val_p) stmt->gsbase.plf |= (unsigned int) plf; else stmt->gsbase.plf &= ~((unsigned int) plf); } /* Return the value of pass local flag PLF on statement STMT. */ static inline unsigned int gimple_plf (gimple stmt, enum plf_mask plf) { return stmt->gsbase.plf & ((unsigned int) plf); } /* Set the UID of statement. */ static inline void gimple_set_uid (gimple g, unsigned uid) { g->gsbase.uid = uid; } /* Return the UID of statement. */ static inline unsigned gimple_uid (const_gimple g) { return g->gsbase.uid; } /* Return true if GIMPLE statement G has register or memory operands. */ static inline bool gimple_has_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN; } /* Return true if GIMPLE statement G has memory operands. */ static inline bool gimple_has_mem_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN; } /* Return the set of DEF operands for statement G. */ static inline struct def_optype_d * gimple_def_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.def_ops; } /* Set DEF to be the set of DEF operands for statement G. */ static inline void gimple_set_def_ops (gimple g, struct def_optype_d *def) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.def_ops = def; } /* Return the set of USE operands for statement G. */ static inline struct use_optype_d * gimple_use_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.use_ops; } /* Set USE to be the set of USE operands for statement G. */ static inline void gimple_set_use_ops (gimple g, struct use_optype_d *use) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.use_ops = use; } /* Return the set of VUSE operand for statement G. */ static inline use_operand_p gimple_vuse_op (const_gimple g) { struct use_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_USE_OPERAND_P; ops = g->gsops.opbase.use_ops; if (ops && USE_OP_PTR (ops)->use == &g->gsmembase.vuse) return USE_OP_PTR (ops); return NULL_USE_OPERAND_P; } /* Return the set of VDEF operand for statement G. */ static inline def_operand_p gimple_vdef_op (const_gimple g) { struct def_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_DEF_OPERAND_P; ops = g->gsops.opbase.def_ops; if (ops && DEF_OP_PTR (ops) == &g->gsmembase.vdef) return DEF_OP_PTR (ops); return NULL_DEF_OPERAND_P; } /* Return the single VUSE operand of the statement G. */ static inline tree gimple_vuse (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree gimple_vdef (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vdef; } /* Return the single VUSE operand of the statement G. */ static inline tree * gimple_vuse_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree * gimple_vdef_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vdef; } /* Set the single VUSE operand of the statement G. */ static inline void gimple_set_vuse (gimple g, tree vuse) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vuse = vuse; } /* Set the single VDEF operand of the statement G. */ static inline void gimple_set_vdef (gimple g, tree vdef) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vdef = vdef; } /* Return true if statement G has operands and the modified field has been set. */ static inline bool gimple_modified_p (const_gimple g) { return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false; } /* Return the tree code for the expression computed by STMT. This is only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For GIMPLE_CALL, return CALL_EXPR as the expression code for consistency. This is useful when the caller needs to deal with the three kinds of computation that GIMPLE supports. */ static inline enum tree_code gimple_expr_code (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_COND) return (enum tree_code) stmt->gsbase.subcode; else { gcc_gimple_checking_assert (code == GIMPLE_CALL); return CALL_EXPR; } } /* Mark statement S as modified, and update it. */ static inline void update_stmt (gimple s) { if (gimple_has_ops (s)) { gimple_set_modified (s, true); update_stmt_operands (s); } } /* Update statement S if it has been optimized. */ static inline void update_stmt_if_modified (gimple s) { if (gimple_modified_p (s)) update_stmt_operands (s); } /* Return true if statement STMT contains volatile operands. */ static inline bool gimple_has_volatile_ops (const_gimple stmt) { if (gimple_has_mem_ops (stmt)) return stmt->gsbase.has_volatile_ops; else return false; } /* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */ static inline void gimple_set_has_volatile_ops (gimple stmt, bool volatilep) { if (gimple_has_mem_ops (stmt)) stmt->gsbase.has_volatile_ops = (unsigned) volatilep; } /* Return true if BB is in a transaction. */ static inline bool block_in_transaction (basic_block bb) { return flag_tm && bb->flags & BB_IN_TRANSACTION; } /* Return true if STMT is in a transaction. */ static inline bool gimple_in_transaction (gimple stmt) { return block_in_transaction (gimple_bb (stmt)); } /* Return true if statement STMT may access memory. */ static inline bool gimple_references_memory_p (gimple stmt) { return gimple_has_mem_ops (stmt) && gimple_vuse (stmt); } /* Return the subcode for OMP statement S. */ static inline unsigned gimple_omp_subcode (const_gimple s) { gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD && gimple_code (s) <= GIMPLE_OMP_SINGLE); return s->gsbase.subcode; } /* Set the subcode for OMP statement S to SUBCODE. */ static inline void gimple_omp_set_subcode (gimple s, unsigned int subcode) { /* We only have 16 bits for the subcode. Assert that we are not overflowing it. */ gcc_gimple_checking_assert (subcode < (1 << 16)); s->gsbase.subcode = subcode; } /* Set the nowait flag on OMP_RETURN statement S. */ static inline void gimple_omp_return_set_nowait (gimple s) { GIMPLE_CHECK (s, GIMPLE_OMP_RETURN); s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT; } /* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT flag set. */ static inline bool gimple_omp_return_nowait_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_RETURN); return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0; } /* Return true if OMP section statement G has the GF_OMP_SECTION_LAST flag set. */ static inline bool gimple_omp_section_last_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0; } /* Set the GF_OMP_SECTION_LAST flag on G. */ static inline void gimple_omp_section_set_last (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); g->gsbase.subcode |= GF_OMP_SECTION_LAST; } /* Return true if OMP parallel statement G has the GF_OMP_PARALLEL_COMBINED flag set. */ static inline bool gimple_omp_parallel_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0; } /* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_parallel_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); if (combined_p) g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED; else g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED; } /* Return true if OMP atomic load/store statement G has the GF_OMP_ATOMIC_NEED_VALUE flag set. */ static inline bool gimple_omp_atomic_need_value_p (const_gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0; } /* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */ static inline void gimple_omp_atomic_set_need_value (gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gsbase.subcode |= GF_OMP_ATOMIC_NEED_VALUE; } /* Return the number of operands for statement GS. */ static inline unsigned gimple_num_ops (const_gimple gs) { return gs->gsbase.num_ops; } /* Set the number of operands for statement GS. */ static inline void gimple_set_num_ops (gimple gs, unsigned num_ops) { gs->gsbase.num_ops = num_ops; } /* Return the array of operands for statement GS. */ static inline tree * gimple_ops (gimple gs) { size_t off; /* All the tuples have their operand vector at the very bottom of the structure. Note that those structures that do not have an operand vector have a zero offset. */ off = gimple_ops_offset_[gimple_statement_structure (gs)]; gcc_gimple_checking_assert (off != 0); return (tree *) ((char *) gs + off); } /* Return operand I for statement GS. */ static inline tree gimple_op (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs))[i]; } else return NULL_TREE; } /* Return a pointer to operand I for statement GS. */ static inline tree * gimple_op_ptr (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs)) + i; } else return NULL; } /* Set operand I of statement GS to OP. */ static inline void gimple_set_op (gimple gs, unsigned i, tree op) { gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs)); /* Note. It may be tempting to assert that OP matches is_gimple_operand, but that would be wrong. Different tuples accept slightly different sets of tree operands. Each caller should perform its own validation. */ gimple_ops (gs)[i] = op; } /* Return true if GS is a GIMPLE_ASSIGN. */ static inline bool is_gimple_assign (const_gimple gs) { return gimple_code (gs) == GIMPLE_ASSIGN; } /* Determine if expression CODE is one of the valid expressions that can be used on the RHS of GIMPLE assignments. */ static inline enum gimple_rhs_class get_gimple_rhs_class (enum tree_code code) { return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code]; } /* Return the LHS of assignment statement GS. */ static inline tree gimple_assign_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 0); } /* Return a pointer to the LHS of assignment statement GS. */ static inline tree * gimple_assign_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of assignment statement GS. */ static inline void gimple_assign_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the first operand on the RHS of assignment statement GS. */ static inline tree gimple_assign_rhs1 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 1); } /* Return a pointer to the first operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs1_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 1); } /* Set RHS to be the first operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs1 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 1, rhs); } /* Return the second operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs2 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 3) return gimple_op (gs, 2); else return NULL_TREE; } /* Return a pointer to the second operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs2_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 2); } /* Set RHS to be the second operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs2 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 2, rhs); } /* Return the third operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs3 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 4) return gimple_op (gs, 3); else return NULL_TREE; } /* Return a pointer to the third operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs3_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 3); } /* Set RHS to be the third operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs3 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 3, rhs); } /* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect to see only a maximum of two operands. */ static inline void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, tree op1, tree op2) { gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } /* A wrapper around extract_ops_from_tree_1, for callers which expect to see only a maximum of two operands. */ static inline void extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, tree *op1) { tree op2; extract_ops_from_tree_1 (expr, code, op0, op1, &op2); gcc_assert (op2 == NULL_TREE); } /* Returns true if GS is a nontemporal move. */ static inline bool gimple_assign_nontemporal_move_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gs->gsbase.nontemporal_move; } /* Sets nontemporal move flag of GS to NONTEMPORAL. */ static inline void gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gs->gsbase.nontemporal_move = nontemporal; } /* Return the code of the expression computed on the rhs of assignment statement GS. In case that the RHS is a single object, returns the tree code of the object. */ static inline enum tree_code gimple_assign_rhs_code (const_gimple gs) { enum tree_code code; GIMPLE_CHECK (gs, GIMPLE_ASSIGN); code = (enum tree_code) gs->gsbase.subcode; /* While we initially set subcode to the TREE_CODE of the rhs for GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay in sync when we rewrite stmts into SSA form or do SSA propagations. */ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) code = TREE_CODE (gimple_assign_rhs1 (gs)); return code; } /* Set CODE to be the code for the expression computed on the RHS of assignment S. */ static inline void gimple_assign_set_rhs_code (gimple s, enum tree_code code) { GIMPLE_CHECK (s, GIMPLE_ASSIGN); s->gsbase.subcode = code; } /* Return the gimple rhs class of the code of the expression computed on the rhs of assignment statement GS. This will never return GIMPLE_INVALID_RHS. */ static inline enum gimple_rhs_class gimple_assign_rhs_class (const_gimple gs) { return get_gimple_rhs_class (gimple_assign_rhs_code (gs)); } /* Return true if GS is an assignment with a singleton RHS, i.e., there is no operator associated with the assignment itself. Unlike gimple_assign_copy_p, this predicate returns true for any RHS operand, including those that perform an operation and do not have the semantics of a copy, such as COND_EXPR. */ static inline bool gimple_assign_single_p (gimple gs) { return (is_gimple_assign (gs) && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS); } /* Return true if S is a type-cast assignment. */ static inline bool gimple_assign_cast_p (gimple s) { if (is_gimple_assign (s)) { enum tree_code sc = gimple_assign_rhs_code (s); return CONVERT_EXPR_CODE_P (sc) || sc == VIEW_CONVERT_EXPR || sc == FIX_TRUNC_EXPR; } return false; } /* Return true if S is a clobber statement. */ static inline bool gimple_clobber_p (gimple s) { return gimple_assign_single_p (s) && TREE_CLOBBER_P (gimple_assign_rhs1 (s)); } /* Return true if GS is a GIMPLE_CALL. */ static inline bool is_gimple_call (const_gimple gs) { return gimple_code (gs) == GIMPLE_CALL; } /* Return the LHS of call statement GS. */ static inline tree gimple_call_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 0); } /* Return a pointer to the LHS of call statement GS. */ static inline tree * gimple_call_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of call statement GS. */ static inline void gimple_call_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return true if call GS calls an internal-only function, as enumerated by internal_fn. */ static inline bool gimple_call_internal_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0; } /* Return the target of internal call GS. */ static inline enum internal_fn gimple_call_internal_fn (const_gimple gs) { gcc_gimple_checking_assert (gimple_call_internal_p (gs)); return gs->gimple_call.u.internal_fn; } /* Return the function type of the function called by GS. */ static inline tree gimple_call_fntype (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); if (gimple_call_internal_p (gs)) return NULL_TREE; return gs->gimple_call.u.fntype; } /* Set the type of the function called by GS to FNTYPE. */ static inline void gimple_call_set_fntype (gimple gs, tree fntype) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gs->gimple_call.u.fntype = fntype; } /* Return the tree node representing the function called by call statement GS. */ static inline tree gimple_call_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 1); } /* Return a pointer to the tree node representing the function called by call statement GS. */ static inline tree * gimple_call_fn_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 1); } /* Set FN to be the function called by call statement GS. */ static inline void gimple_call_set_fn (gimple gs, tree fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, fn); } /* Set FNDECL to be the function called by call statement GS. */ static inline void gimple_call_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl)); } /* Set internal function FN to be the function called by call statement GS. */ static inline void gimple_call_set_internal_fn (gimple gs, enum internal_fn fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (gimple_call_internal_p (gs)); gs->gimple_call.u.internal_fn = fn; } /* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL associated with the callee if known. Otherwise return NULL_TREE. */ static inline tree gimple_call_addr_fndecl (const_tree fn) { if (fn && TREE_CODE (fn) == ADDR_EXPR) { tree fndecl = TREE_OPERAND (fn, 0); if (TREE_CODE (fndecl) == MEM_REF && TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR && integer_zerop (TREE_OPERAND (fndecl, 1))) fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); if (TREE_CODE (fndecl) == FUNCTION_DECL) return fndecl; } return NULL_TREE; } /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ static inline tree gimple_call_fndecl (const_gimple gs) { return gimple_call_addr_fndecl (gimple_call_fn (gs)); } /* Return the type returned by call statement GS. */ static inline tree gimple_call_return_type (const_gimple gs) { tree type = gimple_call_fntype (gs); if (type == NULL_TREE) return TREE_TYPE (gimple_call_lhs (gs)); /* The type returned by a function is the type of its function type. */ return TREE_TYPE (type); } /* Return the static chain for call statement GS. */ static inline tree gimple_call_chain (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 2); } /* Return a pointer to the static chain for call statement GS. */ static inline tree * gimple_call_chain_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 2); } /* Set CHAIN to be the static chain for call statement GS. */ static inline void gimple_call_set_chain (gimple gs, tree chain) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 2, chain); } /* Return the number of arguments used by call statement GS. */ static inline unsigned gimple_call_num_args (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_CALL); num_ops = gimple_num_ops (gs); return num_ops - 3; } /* Return the argument at position INDEX for call statement GS. */ static inline tree gimple_call_arg (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, index + 3); } /* Return a pointer to the argument at position INDEX for call statement GS. */ static inline tree * gimple_call_arg_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, index + 3); } /* Set ARG to be the argument at position INDEX for call statement GS. */ static inline void gimple_call_set_arg (gimple gs, unsigned index, tree arg) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, index + 3, arg); } /* If TAIL_P is true, mark call statement S as being a tail call (i.e., a call just before the exit of a function). These calls are candidate for tail call optimization. */ static inline void gimple_call_set_tail (gimple s, bool tail_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (tail_p) s->gsbase.subcode |= GF_CALL_TAILCALL; else s->gsbase.subcode &= ~GF_CALL_TAILCALL; } /* Return true if GIMPLE_CALL S is marked as a tail call. */ static inline bool gimple_call_tail_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0; } /* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return slot optimization. This transformation uses the target of the call expansion as the return slot for calls that return in memory. */ static inline void gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (return_slot_opt_p) s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT; else s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT; } /* Return true if S is marked for return slot optimization. */ static inline bool gimple_call_return_slot_opt_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0; } /* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a thunk to the thunked-to function. */ static inline void gimple_call_set_from_thunk (gimple s, bool from_thunk_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (from_thunk_p) s->gsbase.subcode |= GF_CALL_FROM_THUNK; else s->gsbase.subcode &= ~GF_CALL_FROM_THUNK; } /* Return true if GIMPLE_CALL S is a jump from a thunk. */ static inline bool gimple_call_from_thunk_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0; } /* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline void gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (pass_arg_pack_p) s->gsbase.subcode |= GF_CALL_VA_ARG_PACK; else s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK; } /* Return true if GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline bool gimple_call_va_arg_pack_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0; } /* Return true if S is a noreturn call. */ static inline bool gimple_call_noreturn_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NORETURN) != 0; } /* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw even if the called function can throw in other cases. */ static inline void gimple_call_set_nothrow (gimple s, bool nothrow_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (nothrow_p) s->gsbase.subcode |= GF_CALL_NOTHROW; else s->gsbase.subcode &= ~GF_CALL_NOTHROW; } /* Return true if S is a nothrow call. */ static inline bool gimple_call_nothrow_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } /* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that is known to be emitted for VLA objects. Those are wrapped by stack_save/stack_restore calls and hence can't lead to unbounded stack growth even when they occur in loops. */ static inline void gimple_call_set_alloca_for_var (gimple s, bool for_var) { GIMPLE_CHECK (s, GIMPLE_CALL); if (for_var) s->gsbase.subcode |= GF_CALL_ALLOCA_FOR_VAR; else s->gsbase.subcode &= ~GF_CALL_ALLOCA_FOR_VAR; } /* Return true of S is a call to builtin_alloca emitted for VLA objects. */ static inline bool gimple_call_alloca_for_var_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0; } /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */ static inline void gimple_call_copy_flags (gimple dest_call, gimple orig_call) { GIMPLE_CHECK (dest_call, GIMPLE_CALL); GIMPLE_CHECK (orig_call, GIMPLE_CALL); dest_call->gsbase.subcode = orig_call->gsbase.subcode; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_use_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_used; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_clobber_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_clobbered; } /* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a non-NULL lhs. */ static inline bool gimple_has_lhs (gimple stmt) { return (is_gimple_assign (stmt) || (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE)); } /* Return the code of the predicate computed by conditional statement GS. */ static inline enum tree_code gimple_cond_code (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return (enum tree_code) gs->gsbase.subcode; } /* Set CODE to be the predicate code for the conditional statement GS. */ static inline void gimple_cond_set_code (gimple gs, enum tree_code code) { GIMPLE_CHECK (gs, GIMPLE_COND); gs->gsbase.subcode = code; } /* Return the LHS of the predicate computed by conditional statement GS. */ static inline tree gimple_cond_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 0); } /* Return the pointer to the LHS of the predicate computed by conditional statement GS. */ static inline tree * gimple_cond_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 0, lhs); } /* Return the RHS operand of the predicate computed by conditional GS. */ static inline tree gimple_cond_rhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 1); } /* Return the pointer to the RHS operand of the predicate computed by conditional GS. */ static inline tree * gimple_cond_rhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 1); } /* Set RHS to be the RHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_rhs (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 1, rhs); } /* Return the label used by conditional statement GS when its predicate evaluates to true. */ static inline tree gimple_cond_true_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 2); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to true. */ static inline void gimple_cond_set_true_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 2, label); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to false. */ static inline void gimple_cond_set_false_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 3, label); } /* Return the label used by conditional statement GS when its predicate evaluates to false. */ static inline tree gimple_cond_false_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 3); } /* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */ static inline void gimple_cond_make_false (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_false_node); gs->gsbase.subcode = EQ_EXPR; } /* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */ static inline void gimple_cond_make_true (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_true_node); gs->gsbase.subcode = EQ_EXPR; } /* Check if conditional statemente GS is of the form 'if (1 == 1)', 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */ static inline bool gimple_cond_true_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs != rhs) return true; if (code == EQ_EXPR && lhs == rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (1 != 1)', 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */ static inline bool gimple_cond_false_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs == rhs) return true; if (code == EQ_EXPR && lhs != rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (var != 0)' or 'if (var == 1)' */ static inline bool gimple_cond_single_var_p (gimple gs) { if (gimple_cond_code (gs) == NE_EXPR && gimple_cond_rhs (gs) == boolean_false_node) return true; if (gimple_cond_code (gs) == EQ_EXPR && gimple_cond_rhs (gs) == boolean_true_node) return true; return false; } /* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */ static inline void gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs) { gimple_cond_set_code (stmt, code); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } /* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline tree gimple_label_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_LABEL); return gimple_op (gs, 0); } /* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline void gimple_label_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_LABEL); gimple_set_op (gs, 0, label); } /* Return the destination of the unconditional jump GS. */ static inline tree gimple_goto_dest (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_GOTO); return gimple_op (gs, 0); } /* Set DEST to be the destination of the unconditonal jump GS. */ static inline void gimple_goto_set_dest (gimple gs, tree dest) { GIMPLE_CHECK (gs, GIMPLE_GOTO); gimple_set_op (gs, 0, dest); } /* Return the variables declared in the GIMPLE_BIND statement GS. */ static inline tree gimple_bind_vars (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.vars; } /* Set VARS to be the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = vars; } /* Append VARS to the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_append_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars); } /* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline gimple_seq gimple_bind_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.body; } /* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.body = seq; } /* Append a statement to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_stmt (gimple gs, gimple stmt) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_stmt (&gs->gimple_bind.body, stmt); } /* Append a sequence of statements to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_seq (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_seq (&gs->gimple_bind.body, seq); } /* Return the TREE_BLOCK node associated with GIMPLE_BIND statement GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */ static inline tree gimple_bind_block (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.block; } /* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_block (gimple gs, tree block) { GIMPLE_CHECK (gs, GIMPLE_BIND); gcc_gimple_checking_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK); gs->gimple_bind.block = block; } /* Return the number of input operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_ninputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.ni; } /* Return the number of output operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_noutputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.no; } /* Return the number of clobber operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nclobbers (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nc; } /* Return the number of label operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nlabels (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nl; } /* Return input operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_input_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op (gs, index); } /* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_input_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op_ptr (gs, index); } /* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni && TREE_CODE (in_op) == TREE_LIST); gimple_set_op (gs, index, in_op); } /* Return output operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_output_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op (gs, index + gs->gimple_asm.ni); } /* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_output_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op_ptr (gs, index + gs->gimple_asm.ni); } /* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no && TREE_CODE (out_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni, out_op); } /* Return clobber operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_clobber_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no); } /* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc && TREE_CODE (clobber_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op); } /* Return label operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_label_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc); } /* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl && TREE_CODE (label_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op); } /* Return the string representing the assembly instruction in GIMPLE_ASM GS. */ static inline const char * gimple_asm_string (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.string; } /* Return true if GS is an asm statement marked volatile. */ static inline bool gimple_asm_volatile_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0; } /* If VOLATLE_P is true, mark asm statement GS as volatile. */ static inline void gimple_asm_set_volatile (gimple gs, bool volatile_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (volatile_p) gs->gsbase.subcode |= GF_ASM_VOLATILE; else gs->gsbase.subcode &= ~GF_ASM_VOLATILE; } /* If INPUT_P is true, mark asm GS as an ASM_INPUT. */ static inline void gimple_asm_set_input (gimple gs, bool input_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (input_p) gs->gsbase.subcode |= GF_ASM_INPUT; else gs->gsbase.subcode &= ~GF_ASM_INPUT; } /* Return true if asm GS is an ASM_INPUT. */ static inline bool gimple_asm_input_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_INPUT) != 0; } /* Return the types handled by GIMPLE_CATCH statement GS. */ static inline tree gimple_catch_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.types; } /* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */ static inline tree * gimple_catch_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.types; } /* Return the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq gimple_catch_handler (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.handler; } /* Return a pointer to the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq * gimple_catch_handler_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.handler; } /* Set T to be the set of types handled by GIMPLE_CATCH GS. */ static inline void gimple_catch_set_types (gimple gs, tree t) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.types = t; } /* Set HANDLER to be the body of GIMPLE_CATCH GS. */ static inline void gimple_catch_set_handler (gimple gs, gimple_seq handler) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.handler = handler; } /* Return the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree gimple_eh_filter_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.types; } /* Return a pointer to the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree * gimple_eh_filter_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return &gs->gimple_eh_filter.types; } /* Return the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq gimple_eh_filter_failure (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.failure; } /* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_types (gimple gs, tree types) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.types = types; } /* Set FAILURE to be the sequence of statements to execute on failure for GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_failure (gimple gs, gimple_seq failure) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.failure = failure; } /* Get the function decl to be called by the MUST_NOT_THROW region. */ static inline tree gimple_eh_must_not_throw_fndecl (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); return gs->gimple_eh_mnt.fndecl; } /* Set the function decl to be called by GS to DECL. */ static inline void gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); gs->gimple_eh_mnt.fndecl = decl; } /* GIMPLE_EH_ELSE accessors. */ static inline gimple_seq gimple_eh_else_n_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); return gs->gimple_eh_else.n_body; } static inline gimple_seq gimple_eh_else_e_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); return gs->gimple_eh_else.e_body; } static inline void gimple_eh_else_set_n_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); gs->gimple_eh_else.n_body = seq; } static inline void gimple_eh_else_set_e_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); gs->gimple_eh_else.e_body = seq; } /* GIMPLE_TRY accessors. */ /* Return the kind of try block represented by GIMPLE_TRY GS. This is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */ static inline enum gimple_try_flags gimple_try_kind (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND); } /* Set the kind of try block represented by GIMPLE_TRY GS. */ static inline void gimple_try_set_kind (gimple gs, enum gimple_try_flags kind) { GIMPLE_CHECK (gs, GIMPLE_TRY); gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY); if (gimple_try_kind (gs) != kind) gs->gsbase.subcode = (unsigned int) kind; } /* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline bool gimple_try_catch_is_cleanup (const_gimple gs) { gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH); return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0; } /* Return the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_eval (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.eval; } /* Return the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.cleanup; } /* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline void gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup) { gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH); if (catch_is_cleanup) g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP; else g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP; } /* Set EVAL to be the sequence of statements to use as the body for GIMPLE_TRY GS. */ static inline void gimple_try_set_eval (gimple gs, gimple_seq eval) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.eval = eval; } /* Set CLEANUP to be the sequence of statements to use as the cleanup body for GIMPLE_TRY GS. */ static inline void gimple_try_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.cleanup = cleanup; } /* Return the cleanup sequence for cleanup statement GS. */ static inline gimple_seq gimple_wce_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gimple_wce.cleanup; } /* Set CLEANUP to be the cleanup sequence for GS. */ static inline void gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gimple_wce.cleanup = cleanup; } /* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline bool gimple_wce_cleanup_eh_only (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gsbase.subcode != 0; } /* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline void gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gsbase.subcode = (unsigned int) eh_only_p; } /* Return the maximum number of arguments supported by GIMPLE_PHI GS. */ static inline unsigned gimple_phi_capacity (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.capacity; } /* Return the number of arguments in GIMPLE_PHI GS. This must always be exactly the number of incoming edges for the basic block holding GS. */ static inline unsigned gimple_phi_num_args (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.nargs; } /* Return the SSA name created by GIMPLE_PHI GS. */ static inline tree gimple_phi_result (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.result; } /* Return a pointer to the SSA name created by GIMPLE_PHI GS. */ static inline tree * gimple_phi_result_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return &gs->gimple_phi.result; } /* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */ static inline void gimple_phi_set_result (gimple gs, tree result) { GIMPLE_CHECK (gs, GIMPLE_PHI); gs->gimple_phi.result = result; } /* Return the PHI argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline struct phi_arg_d * gimple_phi_arg (gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity); return &(gs->gimple_phi.args[index]); } /* Set PHIARG to be the argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline void gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs); gs->gimple_phi.args[index] = *phiarg; } /* Return the region number for GIMPLE_RESX GS. */ static inline int gimple_resx_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RESX); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_RESX GS. */ static inline void gimple_resx_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_RESX); gs->gimple_eh_ctrl.region = region; } /* Return the region number for GIMPLE_EH_DISPATCH GS. */ static inline int gimple_eh_dispatch_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */ static inline void gimple_eh_dispatch_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); gs->gimple_eh_ctrl.region = region; } /* Return the number of labels associated with the switch statement GS. */ static inline unsigned gimple_switch_num_labels (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_SWITCH); num_ops = gimple_num_ops (gs); gcc_gimple_checking_assert (num_ops > 1); return num_ops - 1; } /* Set NLABELS to be the number of labels for the switch statement GS. */ static inline void gimple_switch_set_num_labels (gimple g, unsigned nlabels) { GIMPLE_CHECK (g, GIMPLE_SWITCH); gimple_set_num_ops (g, nlabels + 1); } /* Return the index variable used by the switch statement GS. */ static inline tree gimple_switch_index (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op (gs, 0); } /* Return a pointer to the index variable for the switch statement GS. */ static inline tree * gimple_switch_index_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op_ptr (gs, 0); } /* Set INDEX to be the index variable for switch statement GS. */ static inline void gimple_switch_set_index (gimple gs, tree index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index)); gimple_set_op (gs, 0, index); } /* Return the label numbered INDEX. The default label is 0, followed by any labels in a switch statement. */ static inline tree gimple_switch_label (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1); return gimple_op (gs, index + 1); } /* Set the label number INDEX to LABEL. 0 is always the default label. */ static inline void gimple_switch_set_label (gimple gs, unsigned index, tree label) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1 && (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR)); gimple_set_op (gs, index + 1, label); } /* Return the default label for a switch statement. */ static inline tree gimple_switch_default_label (const_gimple gs) { return gimple_switch_label (gs, 0); } /* Set the default label for a switch statement. */ static inline void gimple_switch_set_default_label (gimple gs, tree label) { gimple_switch_set_label (gs, 0, label); } /* Return true if GS is a GIMPLE_DEBUG statement. */ static inline bool is_gimple_debug (const_gimple gs) { return gimple_code (gs) == GIMPLE_DEBUG; } /* Return true if S is a GIMPLE_DEBUG BIND statement. */ static inline bool gimple_debug_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree * gimple_debug_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* The second operand of a GIMPLE_DEBUG_BIND, when the value was optimized away. */ #define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */ /* Remove the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_reset_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE); } /* Return true if the GIMPLE_DEBUG bind statement is bound to a value. */ static inline bool gimple_debug_bind_has_value_p (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE; } #undef GIMPLE_DEBUG_BIND_NOVALUE /* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */ static inline bool gimple_debug_source_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_SOURCE_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree * gimple_debug_source_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* Return the body for the OMP statement GS. */ static inline gimple_seq gimple_omp_body (gimple gs) { return gs->omp.body; } /* Set BODY to be the body for the OMP statement GS. */ static inline void gimple_omp_set_body (gimple gs, gimple_seq body) { gs->omp.body = body; } /* Return the name associated with OMP_CRITICAL statement GS. */ static inline tree gimple_omp_critical_name (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return gs->gimple_omp_critical.name; } /* Return a pointer to the name associated with OMP critical statement GS. */ static inline tree * gimple_omp_critical_name_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return &gs->gimple_omp_critical.name; } /* Set NAME to be the name associated with OMP critical statement GS. */ static inline void gimple_omp_critical_set_name (gimple gs, tree name) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); gs->gimple_omp_critical.name = name; } /* Return the clauses associated with OMP_FOR GS. */ static inline tree gimple_omp_for_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.clauses; } /* Return a pointer to the OMP_FOR GS. */ static inline tree * gimple_omp_for_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return &gs->gimple_omp_for.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */ static inline void gimple_omp_for_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.clauses = clauses; } /* Get the collapse count of OMP_FOR GS. */ static inline size_t gimple_omp_for_collapse (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.collapse; } /* Return the index variable for OMP_FOR GS. */ static inline tree gimple_omp_for_index (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].index; } /* Return a pointer to the index variable for OMP_FOR GS. */ static inline tree * gimple_omp_for_index_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].index; } /* Set INDEX to be the index variable for OMP_FOR GS. */ static inline void gimple_omp_for_set_index (gimple gs, size_t i, tree index) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].index = index; } /* Return the initial value for OMP_FOR GS. */ static inline tree gimple_omp_for_initial (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].initial; } /* Return a pointer to the initial value for OMP_FOR GS. */ static inline tree * gimple_omp_for_initial_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].initial; } /* Set INITIAL to be the initial value for OMP_FOR GS. */ static inline void gimple_omp_for_set_initial (gimple gs, size_t i, tree initial) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].initial = initial; } /* Return the final value for OMP_FOR GS. */ static inline tree gimple_omp_for_final (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].final; } /* Return a pointer to the final value for OMP_FOR GS. */ static inline tree * gimple_omp_for_final_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].final; } /* Set FINAL to be the final value for OMP_FOR GS. */ static inline void gimple_omp_for_set_final (gimple gs, size_t i, tree final) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].final = final; } /* Return the increment value for OMP_FOR GS. */ static inline tree gimple_omp_for_incr (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].incr; } /* Return a pointer to the increment value for OMP_FOR GS. */ static inline tree * gimple_omp_for_incr_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].incr; } /* Set INCR to be the increment value for OMP_FOR GS. */ static inline void gimple_omp_for_set_incr (gimple gs, size_t i, tree incr) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].incr = incr; } /* Return the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq gimple_omp_for_pre_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.pre_body; } /* Set PRE_BODY to be the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline void gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.pre_body = pre_body; } /* Return the clauses associated with OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_task_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_task_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_task_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_task_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_task_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_task_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_task_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_taskreg_clauses (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_clauses_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_clauses (gimple gs, tree clauses) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_taskreg_child_fn (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_child_fn_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_taskreg_data_arg (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_data_arg_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the copy function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_copy_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.copy_fn; } /* Return a pointer to the copy function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_copy_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.copy_fn; } /* Set CHILD_FN to be the copy function for OMP_TASK GS. */ static inline void gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.copy_fn = copy_fn; } /* Return size of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_size (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_size; } /* Return a pointer to the data block size for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_size_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_size; } /* Set ARG_SIZE to be the data block size for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_size (gimple gs, tree arg_size) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_size = arg_size; } /* Return align of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_align (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_align; } /* Return a pointer to the data block align for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_align_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_align; } /* Set ARG_SIZE to be the data block align for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_align (gimple gs, tree arg_align) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_align = arg_align; } /* Return the clauses associated with OMP_SINGLE GS. */ static inline tree gimple_omp_single_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return gs->gimple_omp_single.clauses; } /* Return a pointer to the clauses associated with OMP_SINGLE GS. */ static inline tree * gimple_omp_single_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return &gs->gimple_omp_single.clauses; } /* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */ static inline void gimple_omp_single_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); gs->gimple_omp_single.clauses = clauses; } /* Return the clauses associated with OMP_SECTIONS GS. */ static inline tree gimple_omp_sections_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.clauses; } /* Return a pointer to the clauses associated with OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.clauses; } /* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS GS. */ static inline void gimple_omp_sections_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.clauses = clauses; } /* Return the control variable associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline tree gimple_omp_sections_control (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.control; } /* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_control_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.control; } /* Set CONTROL to be the set of clauses associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline void gimple_omp_sections_set_control (gimple gs, tree control) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.control = control; } /* Set COND to be the condition code for OMP_FOR GS. */ static inline void gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison && i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].cond = cond; } /* Return the condition code associated with OMP_FOR GS. */ static inline enum tree_code gimple_omp_for_cond (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].cond; } /* Set the value being stored in an atomic store. */ static inline void gimple_omp_atomic_store_set_val (gimple g, tree val) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gimple_omp_atomic_store.val = val; } /* Return the value being stored in an atomic store. */ static inline tree gimple_omp_atomic_store_val (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return g->gimple_omp_atomic_store.val; } /* Return a pointer to the value being stored in an atomic store. */ static inline tree * gimple_omp_atomic_store_val_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return &g->gimple_omp_atomic_store.val; } /* Set the LHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_lhs (gimple g, tree lhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.lhs = lhs; } /* Get the LHS of an atomic load. */ static inline tree gimple_omp_atomic_load_lhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.lhs; } /* Return a pointer to the LHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_lhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.lhs; } /* Set the RHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_rhs (gimple g, tree rhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.rhs = rhs; } /* Get the RHS of an atomic load. */ static inline tree gimple_omp_atomic_load_rhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.rhs; } /* Return a pointer to the RHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_rhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.rhs; } /* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_def (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_def; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_def_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_def; } /* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_def (gimple g, tree def) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_def = def; } /* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_use (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_use; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_use_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_use; } /* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_use (gimple g, tree use) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_use = use; } /* Return the body for the GIMPLE_TRANSACTION statement GS. */ static inline gimple_seq gimple_transaction_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gimple_transaction.body; } /* Return the label associated with a GIMPLE_TRANSACTION. */ static inline tree gimple_transaction_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gimple_transaction.label; } static inline tree * gimple_transaction_label_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return &gs->gimple_transaction.label; } /* Return the subcode associated with a GIMPLE_TRANSACTION. */ static inline unsigned int gimple_transaction_subcode (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gsbase.subcode; } /* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */ static inline void gimple_transaction_set_body (gimple gs, gimple_seq body) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gimple_transaction.body = body; } /* Set the label associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gimple_transaction.label = label; } /* Set the subcode associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_subcode (gimple gs, unsigned int subcode) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gsbase.subcode = subcode; } /* Return a pointer to the return value for GIMPLE_RETURN GS. */ static inline tree * gimple_return_retval_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op_ptr (gs, 0); } /* Return the return value for GIMPLE_RETURN GS. */ static inline tree gimple_return_retval (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op (gs, 0); } /* Set RETVAL to be the return value for GIMPLE_RETURN GS. */ static inline void gimple_return_set_retval (gimple gs, tree retval) { GIMPLE_CHECK (gs, GIMPLE_RETURN); gimple_set_op (gs, 0, retval); } /* Returns true when the gimple statment STMT is any of the OpenMP types. */ #define CASE_GIMPLE_OMP \ case GIMPLE_OMP_PARALLEL: \ case GIMPLE_OMP_TASK: \ case GIMPLE_OMP_FOR: \ case GIMPLE_OMP_SECTIONS: \ case GIMPLE_OMP_SECTIONS_SWITCH: \ case GIMPLE_OMP_SINGLE: \ case GIMPLE_OMP_SECTION: \ case GIMPLE_OMP_MASTER: \ case GIMPLE_OMP_ORDERED: \ case GIMPLE_OMP_CRITICAL: \ case GIMPLE_OMP_RETURN: \ case GIMPLE_OMP_ATOMIC_LOAD: \ case GIMPLE_OMP_ATOMIC_STORE: \ case GIMPLE_OMP_CONTINUE static inline bool is_gimple_omp (const_gimple stmt) { switch (gimple_code (stmt)) { CASE_GIMPLE_OMP: return true; default: return false; } } /* Returns TRUE if statement G is a GIMPLE_NOP. */ static inline bool gimple_nop_p (const_gimple g) { return gimple_code (g) == GIMPLE_NOP; } /* Return true if GS is a GIMPLE_RESX. */ static inline bool is_gimple_resx (const_gimple gs) { return gimple_code (gs) == GIMPLE_RESX; } /* Return the predictor of GIMPLE_PREDICT statement GS. */ static inline enum br_predictor gimple_predict_predictor (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN); } /* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ static inline void gimple_predict_set_predictor (gimple gs, enum br_predictor predictor) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN) | (unsigned) predictor; } /* Return the outcome of GIMPLE_PREDICT statement GS. */ static inline enum prediction gimple_predict_outcome (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; } /* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ static inline void gimple_predict_set_outcome (gimple gs, enum prediction outcome) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); if (outcome == TAKEN) gs->gsbase.subcode |= GF_PREDICT_TAKEN; else gs->gsbase.subcode &= ~GF_PREDICT_TAKEN; } /* Return the type of the main expression computed by STMT. Return void_type_node if the statement computes nothing. */ static inline tree gimple_expr_type (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree type; /* In general we want to pass out a type that can be substituted for both the RHS and the LHS types if there is a possibly useless conversion involved. That means returning the original RHS type as far as we can reconstruct it. */ if (code == GIMPLE_CALL) type = gimple_call_return_type (stmt); else switch (gimple_assign_rhs_code (stmt)) { case POINTER_PLUS_EXPR: type = TREE_TYPE (gimple_assign_rhs1 (stmt)); break; default: /* As fallback use the type of the LHS. */ type = TREE_TYPE (gimple_get_lhs (stmt)); break; } return type; } else if (code == GIMPLE_COND) return boolean_type_node; else return void_type_node; } /* Return true if TYPE is a suitable type for a scalar register variable. */ static inline bool is_gimple_reg_type (tree type) { return !AGGREGATE_TYPE_P (type); } /* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ static inline gimple_stmt_iterator gsi_start (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the first statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = bb; return i; } /* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */ static inline gimple_stmt_iterator gsi_last (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the last statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = bb; return i; } /* Return true if I is at the end of its sequence. */ static inline bool gsi_end_p (gimple_stmt_iterator i) { return i.ptr == NULL; } /* Return true if I is one statement before the end of its sequence. */ static inline bool gsi_one_before_end_p (gimple_stmt_iterator i) { return i.ptr != NULL && i.ptr->next == NULL; } /* Advance the iterator to the next gimple statement. */ static inline void gsi_next (gimple_stmt_iterator *i) { i->ptr = i->ptr->next; } /* Advance the iterator to the previous gimple statement. */ static inline void gsi_prev (gimple_stmt_iterator *i) { i->ptr = i->ptr->prev; } /* Return the current stmt. */ static inline gimple gsi_stmt (gimple_stmt_iterator i) { return i.ptr->stmt; } /* Return a block statement iterator that points to the first non-label statement in block BB. */ static inline gimple_stmt_iterator gsi_after_labels (basic_block bb) { gimple_stmt_iterator gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) gsi_next (&gsi); return gsi; } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_next_nondebug (gimple_stmt_iterator *i) { do { gsi_next (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_prev_nondebug (gimple_stmt_iterator *i) { do { gsi_prev (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Return a new iterator pointing to the first non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_start_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_next_nondebug (&i); return i; } /* Return a new iterator pointing to the last non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_last_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_prev_nondebug (&i); return i; } /* Return a pointer to the current stmt. NOTE: You may want to use gsi_replace on the iterator itself, as this performs additional bookkeeping that will not be done if you simply assign through a pointer returned by gsi_stmt_ptr. */ static inline gimple * gsi_stmt_ptr (gimple_stmt_iterator *i) { return &i->ptr->stmt; } /* Return the basic block associated with this iterator. */ static inline basic_block gsi_bb (gimple_stmt_iterator i) { return i.bb; } /* Return the sequence associated with this iterator. */ static inline gimple_seq gsi_seq (gimple_stmt_iterator i) { return i.seq; } enum gsi_iterator_update { GSI_NEW_STMT, /* Only valid when single statement is added, move iterator to it. */ GSI_SAME_STMT, /* Leave the iterator at the same statement. */ GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for linking other statements in the same direction. */ }; /* In gimple-iterator.c */ gimple_stmt_iterator gsi_start_phis (basic_block); gimple_seq gsi_split_seq_after (gimple_stmt_iterator); gimple_seq gsi_split_seq_before (gimple_stmt_iterator *); void gsi_replace (gimple_stmt_iterator *, gimple, bool); void gsi_insert_before (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_after (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_remove (gimple_stmt_iterator *, bool); gimple_stmt_iterator gsi_for_stmt (gimple); void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *); void gsi_insert_on_edge (edge, gimple); void gsi_insert_seq_on_edge (edge, gimple_seq); basic_block gsi_insert_on_edge_immediate (edge, gimple); basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq); void gsi_commit_one_edge_insert (edge, basic_block *); void gsi_commit_edge_inserts (void); gimple gimple_call_copy_skip_args (gimple, bitmap); /* Convenience routines to walk all statements of a gimple function. Note that this is useful exclusively before the code is converted into SSA form. Once the program is in SSA form, the standard operand interface should be used to analyze/modify statements. */ struct walk_stmt_info { /* Points to the current statement being walked. */ gimple_stmt_iterator gsi; /* Additional data that the callback functions may want to carry through the recursion. */ void *info; /* Pointer map used to mark visited tree nodes when calling walk_tree on each operand. If set to NULL, duplicate tree nodes will be visited more than once. */ struct pointer_set_t *pset; /* Operand returned by the callbacks. This is set when calling walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback returns non-NULL, this field will contain the tree returned by the last callback. */ tree callback_result; /* Indicates whether the operand being examined may be replaced with something that matches is_gimple_val (if true) or something slightly more complicated (if false). "Something" technically means the common subset of is_gimple_lvalue and is_gimple_rhs, but we never try to form anything more complicated than that, so we don't bother checking. Also note that CALLBACK should update this flag while walking the sub-expressions of a statement. For instance, when walking the statement 'foo (&var)', the flag VAL_ONLY will initially be set to true, however, when walking &var, the operand of that ADDR_EXPR does not need to be a GIMPLE value. */ BOOL_BITFIELD val_only : 1; /* True if we are currently walking the LHS of an assignment. */ BOOL_BITFIELD is_lhs : 1; /* Optional. Set to true by the callback functions if they made any changes. */ BOOL_BITFIELD changed : 1; /* True if we're interested in location information. */ BOOL_BITFIELD want_locations : 1; /* True if we've removed the statement that was processed. */ BOOL_BITFIELD removed_stmt : 1; }; /* Callback for walk_gimple_stmt. Called for every statement found during traversal. The first argument points to the statement to walk. The second argument is a flag that the callback sets to 'true' if it the callback handled all the operands and sub-statements of the statement (the default value of this flag is 'false'). The third argument is an anonymous pointer to data to be used by the callback. */ typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *, struct walk_stmt_info *); gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *); #ifdef GATHER_STATISTICS /* Enum and arrays used for allocation stats. Keep in sync with gimple.c:gimple_alloc_kind_names. */ enum gimple_alloc_kind { gimple_alloc_kind_assign, /* Assignments. */ gimple_alloc_kind_phi, /* PHI nodes. */ gimple_alloc_kind_cond, /* Conditionals. */ gimple_alloc_kind_seq, /* Sequences. */ gimple_alloc_kind_rest, /* Everything else. */ gimple_alloc_kind_all }; extern int gimple_alloc_counts[]; extern int gimple_alloc_sizes[]; /* Return the allocation kind for a given stmt CODE. */ static inline enum gimple_alloc_kind gimple_alloc_kind (enum gimple_code code) { switch (code) { case GIMPLE_ASSIGN: return gimple_alloc_kind_assign; case GIMPLE_PHI: return gimple_alloc_kind_phi; case GIMPLE_COND: return gimple_alloc_kind_cond; default: return gimple_alloc_kind_rest; } } #endif /* GATHER_STATISTICS */ extern void dump_gimple_statistics (void); /* In gimple-fold.c. */ void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree); tree gimple_fold_builtin (gimple); bool fold_stmt (gimple_stmt_iterator *); bool fold_stmt_inplace (gimple_stmt_iterator *); tree get_symbol_constant_value (tree); tree canonicalize_constructor_val (tree); extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); bool gimple_val_nonnegative_real_p (tree); #endif /* GCC_GIMPLE_H */
flux.c
/* Author: Mohammed Al Farhan Email: mohammed.farhan@kaust.edu.sa */ #include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "inc/ktime.h" #include "inc/geometry.h" #include "inc/ker/phy.h" #define MAG0 (0.5 / 3) #define MAG1 (-MAG0) /* Calculates the residual */ void compute_flux(struct flux *restrict flux) { struct ktime ktime; setktime(&ktime); const size_t bsz = flux->bsz; const size_t nfnodes = flux->nfnodes; const size_t dofs = flux->dofs; const uint32_t snfc = flux->snfc; const double pressure = flux->pressure; const double velocity_u = flux->velocity_u; const double velocity_v = flux->velocity_v; const double velocity_w = flux->velocity_w; const double *restrict f_xyz0 = flux->f_xyz0; const double *restrict f_xyz1 = flux->f_xyz1; const double *restrict f_xyz2 = flux->f_xyz2; const double *restrict xyz0 = flux->xyz0; const double *restrict xyz1 = flux->xyz1; const double *restrict xyz2 = flux->xyz2; const uint32_t *restrict ie = flux->ie; const uint32_t *restrict part = flux->part; const uint32_t *restrict snfic = flux->snfic; const uint32_t *restrict n0 = flux->n0; const uint32_t *restrict n1 = flux->n1; const uint32_t *restrict nfptr = flux->nfptr; const uint32_t *restrict sn0 = flux->sn0; const uint32_t *restrict sn1 = flux->sn1; const uint32_t *restrict sn2 = flux->sn2; const double *restrict x0 = flux->x0; const double *restrict x1 = flux->x1; const double *restrict x2 = flux->x2; const double *restrict x3 = flux->x3; const double *restrict q = flux->q; const double *restrict gradx0 = flux->gradx0; const double *restrict gradx1 = flux->gradx1; const double *restrict gradx2 = flux->gradx2; double *restrict r = flux->r; memset(r, 0, dofs * sizeof(double)); __assume_aligned(r, 64); /* Calculates the fluxes on the face and performs the flux balance */ #pragma omp parallel { uint32_t t = omp_get_thread_num(); uint32_t ie0 = ie[t]; uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { uint32_t node0 = n0[i]; uint32_t node1 = n1[i]; double xn = x0[i]; double yn = x1[i]; double zn = x2[i]; double ln = x3[i]; double xmean = 0.5f * (xyz0[node0] + xyz0[node1]); double ymean = 0.5f * (xyz1[node0] + xyz1[node1]); double zmean = 0.5f * (xyz2[node0] + xyz2[node1]); /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal and V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Get variables on "left" and "right" side of face */ double rx = xmean - xyz0[node0]; double ry = ymean - xyz1[node0]; double rz = zmean - xyz2[node0]; uint32_t idx0 = bsz * node0; uint32_t idx1 = bsz * node1; // Pressure double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx; pL += gradx1[idx0 + 0] * ry; pL += gradx2[idx0 + 0] * rz; // Velocity u double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx; uL += gradx1[idx0 + 1] * ry; uL += gradx2[idx0 + 1] * rz; // Velocity v double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx; vL += gradx1[idx0 + 2] * ry; vL += gradx2[idx0 + 2] * rz; // Velocity w double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx; wL += gradx1[idx0 + 3] * ry; wL += gradx2[idx0 + 3] * rz; double ubarL = xn * uL; ubarL += yn * vL; ubarL += zn * wL; rx = xmean - xyz0[node1]; ry = ymean - xyz1[node1]; rz = zmean - xyz2[node1]; // Pressure double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx; pR += gradx1[idx1 + 0] * ry; pR += gradx2[idx1 + 0] * rz; // Velocity u double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx; uR += gradx1[idx1 + 1] * ry; uR += gradx2[idx1 + 1] * rz; // Velocity v double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx; vR += gradx1[idx1 + 2] * ry; vR += gradx2[idx1 + 2] * rz; // Velocity w double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx; wR += gradx1[idx1 + 3] * ry; wR += gradx2[idx1 + 3] * rz; double ubarR = xn * uR; ubarR += yn * vR; ubarR += zn * wR; /* Compute averages */ //double p = 0.5f * (pL + pR); double u = 0.5f * (uL + uR); double v = 0.5f * (vL + vR); double w = 0.5f * (wL + wR); double ubar = xn * u; ubar += yn * v; ubar += zn * w; double phi1 = xn * BETA; phi1 += u * ubar; double phi2 = yn * BETA; phi2 += v * ubar; double phi3 = zn * BETA; phi3 += w * ubar; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double c2 = ubar * ubar + BETA; double c = sqrt(c2); /* Now compute eigenvalues, eigenvectors, and strengths */ double eig1 = fabs(ubar); double eig2 = fabs(ubar); double eig3 = fabs(ubar + c); double eig4 = fabs(ubar - c); double dp = pR - pL; double du = uR - uL; double dv = vR - vL; double dw = wR - wL; /* Components of T(inverse) */ double ti11 = u * phi4; ti11 += v * phi5; ti11 += w * phi6; ti11 = -ti11 / BETA; double ti21 = u * phi7; ti21 += v * phi8; ti21 += w * phi9; ti21 = -ti21 / BETA; double ti31 = 0.5f * (c - ubar); ti31 /= BETA; double ti41 = -0.5f * (c + ubar); ti41 /= BETA; /* jumps (T(inverse) * dq) */ double dv1 = ti11 * dp; dv1 += phi4 * du; dv1 += phi5 * dv; dv1 += phi6 * dw; dv1 /= c2; double dv2 = ti21 * dp; dv2 += phi7 * du; dv2 += phi8 * dv; dv2 += phi9 * dw; dv2 /= c2; double dv3 = 2.f * ti31 * dp; dv3 += xn * du; dv3 += yn * dv; dv3 += zn * dw; dv3 *= 0.5f / c2; double dv4 = 2.f * ti41 * dp; dv4 += xn * du; dv4 += yn * dv; dv4 += zn * dw; dv4 *= 0.5f / c2; /* Now get elements of T */ double r13 = c * BETA; double r23 = u * (ubar + c); r23 += xn * BETA; double r33 = v * (ubar + c); r33 += yn * BETA; double r43 = w * (ubar + c); r43 += zn * BETA; double r14 = -c * BETA; double r24 = u * (ubar - c); r24 += xn * BETA; double r34 = v * (ubar - c); r34 += yn * BETA; double r44 = w * (ubar - c); r44 += zn * BETA; /* Calculate T* |lambda| * T(inverse) */ double t1 = eig3 * r13 * dv3 + eig4 * r14 * dv4; double t2 = eig1 * X1 * dv1 + eig2 * X2 * dv2; t2 += eig3 * r23 * dv3 + eig4 * r24 * dv4; double t3 = eig1 * Y1 * dv1 + eig2 * Y2 * dv2; t3 += eig3 * r33 * dv3 + eig4 * r34 * dv4; double t4 = eig1 * Z1 * dv1 + eig2 * Z2 * dv2; t4 += eig3 * r43 * dv3 + eig4 * r44 * dv4; /* Modify to calculate .5(fl +fr) from nodes instead of extrapolated ones */ double fluxp1 = ln * BETA * ubarL; double fluxp2 = ln * (uL * ubarL + xn * pL); double fluxp3 = ln * (vL * ubarL + yn * pL); double fluxp4 = ln * (wL * ubarL + zn * pL); /* Now the right side */ double fluxm1 = ln * BETA * ubarR; double fluxm2 = ln * (uR * ubarR + xn * pR); double fluxm3 = ln * (vR * ubarR + yn * pR); double fluxm4 = ln * (wR * ubarR + zn * pR); double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1); double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2); double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3); double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4); r[idx0 + 0] = (part[node0] == t) ? (r[idx0 + 0] + res1) : r[idx0 + 0]; r[idx0 + 1] = (part[node0] == t) ? (r[idx0 + 1] + res2) : r[idx0 + 1]; r[idx0 + 2] = (part[node0] == t) ? (r[idx0 + 2] + res3) : r[idx0 + 2]; r[idx0 + 3] = (part[node0] == t) ? (r[idx0 + 3] + res4) : r[idx0 + 3]; r[idx1 + 0] = (part[node1] == t) ? (r[idx1 + 0] - res1) : r[idx1 + 0]; r[idx1 + 1] = (part[node1] == t) ? (r[idx1 + 1] - res2) : r[idx1 + 1]; r[idx1 + 2] = (part[node1] == t) ? (r[idx1 + 2] - res3) : r[idx1 + 2]; r[idx1 + 3] = (part[node1] == t) ? (r[idx1 + 3] - res4) : r[idx1 + 3]; } } uint32_t i; for(i = 0; i < snfc; i++) { uint32_t if0 = snfic[i]; uint32_t if1 = snfic[i+1]; uint32_t j; #pragma omp parallel for for(j = if0; j < if1; j++) { uint32_t node0 = sn0[j]; uint32_t node1 = sn1[j]; uint32_t node2 = sn2[j]; double p1 = q[bsz * node0]; double p2 = q[bsz * node1]; double p3 = q[bsz * node2]; double ax = xyz0[node1] - xyz0[node0]; double ay = xyz1[node1] - xyz1[node0]; double az = xyz2[node1] - xyz2[node0]; double bx = xyz0[node2] - xyz0[node0]; double by = xyz1[node2] - xyz1[node0]; double bz = xyz2[node2] - xyz2[node0]; /* Normal points away from grid interior. Magnitude is 1/3 area of surface triangle. */ double xn = ay * bz; xn -= az * by; xn *= MAG1; double yn = ax * bz; yn -= az * bx; yn *= MAG0; double zn = ax * by; zn -= ay * bx; zn *= MAG1; double pa = 0.125f * (p2 + p3); pa += 0.75f * p1; double pb = 0.125f * (p3 + p1); pb += 0.75f * p2; double pc = 0.125f * (p1 + p2); pc += 0.75f * p3; uint32_t idx; idx = bsz * node0; r[idx + 1] += xn * pa; r[idx + 2] += yn * pa; r[idx + 3] += zn * pa; idx = bsz * node1; r[idx + 1] += xn * pb; r[idx + 2] += yn * pb; r[idx + 3] += zn * pb; idx = bsz * node2; r[idx + 1] += xn * pc; r[idx + 2] += yn * pc; r[idx + 3] += zn * pc; } } /* Do the free boundaries */ #pragma omp parallel for for(i = 0; i < nfnodes; i++) { uint32_t n = nfptr[i]; /* Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn has the magnitude of the face contained in it. */ double xn = f_xyz0[i]; double yn = f_xyz1[i]; double zn = f_xyz2[i]; double area = xn * xn; area += yn * yn; area += zn * zn; area = sqrt(area); xn /= area; yn /= area; zn /= area; /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector (V1) */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal with V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Calculate elements of T and T(inverse) evaluated at free-stream */ double ubar0 = xn * velocity_u; ubar0 += yn * velocity_v; ubar0 += zn * velocity_w; double c20 = ubar0 * ubar0 + BETA; double c0 = sqrt(c20); double phi1 = xn * BETA; phi1 += velocity_u * ubar0; double phi2 = yn * BETA; phi2 += velocity_v * ubar0; double phi3 = zn * BETA; phi3 += velocity_w * ubar0; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double t13 = c0 * BETA; double t23 = velocity_u * (ubar0 + c0); t23 += xn * BETA; double t33 = velocity_v * (ubar0 + c0); t33 += yn * BETA; double t43 = velocity_w * (ubar0 + c0); t43 += zn * BETA; double t14 = -c0 * BETA; double t24 = velocity_u * (ubar0 - c0); t24 += xn * BETA; double t34 = velocity_v * (ubar0 - c0); t34 += yn * BETA; double t44 = velocity_w * (ubar0 - c0); t44 += zn * BETA; double ti11 = velocity_u * phi4; ti11 += velocity_v * phi5; ti11 += velocity_w * phi6; ti11 = -ti11/BETA; double ti21 = velocity_u * phi7; ti21 += velocity_v * phi8; ti21 += velocity_w * phi9; ti21 = -ti21/BETA; double ti31 = 0.5f * (c0 - ubar0); ti31 /= BETA; double ti41 = -0.5f * (c0 + ubar0); ti41 /= BETA; /* Now, get the variables on the "inside" */ double pi = q[bsz * n + 0]; double ui = q[bsz * n + 1]; double vi = q[bsz * n + 2]; double wi = q[bsz * n + 3]; double un = xn * ui; un += yn * vi; un += zn * wi; /* If ubar is negative, take the reference condition from outside */ double pr, ur, vr, wr; if(un > 0.f) { pr = pi; ur = ui; vr = vi; wr = wi; } else { pr = pressure; ur = velocity_u; vr = velocity_v; wr = velocity_w; } /* Set rhs */ double rhs1 = ti11 * pr; rhs1 += phi4 * ur; rhs1 += phi5 * vr; rhs1 += phi6 * wr; rhs1 /= c20; double rhs2 = ti21 * pr; rhs2 += phi7 * ur; rhs2 += phi8 * vr; rhs2 += phi9 * wr; rhs2 /= c20; double rhs3 = 2.f * ti31 * pi; rhs3 += xn * ui; rhs3 += yn * vi; rhs3 += zn * wi; rhs3 = 0.5f * rhs3 / c20; double rhs4 = 2.f * ti41 * pressure; rhs4 += xn * velocity_u; rhs4 += yn * velocity_v; rhs4 += zn * velocity_w; rhs4 = 0.5f * rhs4 / c20; /* Now do matrix multiplication to get values on boundary */ double pb = t13 * rhs3; pb += t14 * rhs4; double ub = X1 * rhs1; ub += X2 * rhs2; ub += t23 * rhs3; ub += t24 * rhs4; double vb = Y1 * rhs1; vb += Y2 * rhs2; vb += t33 * rhs3; vb += t34 * rhs4; double wb = Z1 * rhs1; wb += Z2 * rhs2; wb += t43 * rhs3; wb += t44 * rhs4; double ubar = xn * ub; ubar += yn * vb; ubar += zn * wb; uint32_t idx = bsz * n; r[idx + 0] += area * BETA * ubar; r[idx + 1] += area * (ub * ubar + xn * pb); r[idx + 2] += area * (vb * ubar + yn * pb); r[idx + 3] += area * (wb * ubar + zn * pb); } compute_time(&ktime, flux->t); }
panama_fmt_plug.c
/* Panama cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_panama_; #elif FMT_REGISTERS_H john_register_one(&fmt_panama_); #else #include <string.h> #include "arch.h" #include "sph_panama.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 1 - 217k // 64 - 1930k // 128 - 2099k // 256 - 2204k *** set to this level // 512 - 2203k // 1k - 2124k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 8 #else #define OMP_SCALE 256 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "Panama" #define FORMAT_NAME "" #define FORMAT_TAG "$panama$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "Panama 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 32 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests panama__tests[] = { {"049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"}, {"$panama$049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"}, {"a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"}, {"$panama$a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"}, {"017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"}, {"$panama$017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"}, {"3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"}, {"$panama$3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (hexlenl(p, &extra) != BINARY_SIZE*2 || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + BINARY_SIZE * 2 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE * 2 + 1); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_panama_context ctx; sph_panama_init(&ctx); sph_panama(&ctx, saved_key[index], strlen(saved_key[index])); sph_panama_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void panama_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_panama_ = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, panama__tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, panama_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
PIOpenMPFalseSharing.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> unsigned long num_steps = 1000000000ul; // 1E9 double step; #define NUM_THREADS 2 int main() { int nthreads = 0; long double pi, sum[NUM_THREADS]; step = 1.0 / (double) num_steps; omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, id, nthrds; double x; id = omp_get_thread_num(); nthrds = omp_get_num_threads(); if (id == 0) nthreads = nthrds; for (i = id, sum[id] = 0.0; i < num_steps; i = i + nthrds) { x = (i + 0.5) * step; sum[id] += 4.0 / (1.0 + x * x); } } int i; for (i = 0, pi = 0.0; i < nthreads; i++) pi += sum[i] * step; printf("Iterations\t%ld\n", num_steps); printf("PI\t\t%.100Lf\n", pi); return EXIT_SUCCESS; }
fci_contract.c
/* Copyright 2014-2020 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "np_helper/np_helper.h" #include "fci.h" // for (16e,16o) ~ 11 MB buffer = 120 * 12870 * 8 #define STRB_BLKSIZE 112 /* * CPU timing of single thread can be estimated: * na*nb*nnorb*8(bytes)*5 / (mem_freq*64 (*2 if dual-channel mem)) * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) * where the 5 times memory accesses are 3 in prog_a_t1, prog0_b_t1, * spread_b_t1 and 2 in spread_a_t1 * * multi threads * na*nb*nnorb*8(bytes)*2 / (mem_freq*64 (*2 if dual-channel mem)) due to single thread * + na*nb*nnorb*8(bytes)*3 / max_mem_bandwidth due to N-thread * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) / num_threads */ /* *********************************************************** * * Need the permutation symmetry * h2e[i,j,k,l] = h2e[j,i,k,l] = h2e[i,j,l,k] = h2e[j,i,l,k] * *********************************************************** */ /* * optimize for OpenMP, to reduce memory/CPU data transfer * add software prefetch, it's especially important for OpenMP */ /* * For given stra_id, spread alpah-strings (which can propagate to stra_id) * into t1[:nstrb,nnorb] * str1-of-alpha -> create/annihilate -> str0-of-alpha * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_a_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci0 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *pt1, *pci; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pt1 = t1 + ia*bcount; pci = ci0 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { pt1[k] += pci[k]; } } else if (sign < 0) { for (k = 0; k < bcount; k++) { pt1[k] -= pci[k]; } } } } /* * For given stra_id, spread all beta-strings into t1[:nstrb,nnorb] * all str0-of-beta -> create/annihilate -> str1-of-beta * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_b_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { t1[ia*bcount+str0] += sign * pci[str1]; } } tab += nlinkb; } } /* * spread t1 into ci1 */ void FCIspread_a_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci1 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*bcount; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } void FCIspread_b_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci1 + stra_id * (size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { pci[str1] += sign * t1[ia*bcount+str0]; } } tab += nlinkb; } } /* * f1e_tril is the 1e hamiltonian for spin alpha */ void FCIcontract_a_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci0, *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinka * nstra); FCIcompress_link_tril(clink, link_indexa, nstra, nlinka); for (str0 = 0; str0 < nstra; str0++) { tab = clink + str0 * nlinka; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci0 = ci0 + str0 * nstrb; pci1 = ci1 + str1 * nstrb; tmp = sign * f1e_tril[ia]; for (k = 0; k < nstrb; k++) { pci1[k] += tmp * pci0[k]; } } } free(clink); } /* * f1e_tril is the 1e hamiltonian for spin beta */ void FCIcontract_b_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinkb * nstrb); FCIcompress_link_tril(clink, link_indexb, nstrb, nlinkb); for (str0 = 0; str0 < nstra; str0++) { pci1 = ci1 + str0 * nstrb; for (k = 0; k < nstrb; k++) { tab = clink + k * nlinkb; tmp = ci0[str0*nstrb+k]; for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1[str1] += sign * tmp * f1e_tril[ia]; } } } free(clink); } void FCIcontract_1e_spin0(double *f1e_tril, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { NPdset0(ci1, ((size_t)na) * na); FCIcontract_a_1e(f1e_tril, ci0, ci1, norb, na, na, nlink, nlink, link_index, link_index); } /* * spread t1 into ci1buf */ static void spread_bufa_t1(double *ci1, double *t1, int nrow_t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*nrow_t1; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } /* * bcount_for_spread_a is different for spin1 and spin0 */ static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount_for_spread_a, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; NPdset0(t1, nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); //FCIspread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0, // norb, ncol_ci1buf, nlinka, clink_indexa); spread_bufa_t1(ci1buf, vt1, bcount, bcount_for_spread_a, stra_id, 0, norb, ncol_ci1buf, nlinka, clink_indexa); } void FCIaxpy2d(double *out, double *in, size_t count, size_t no, size_t ni) { int i, j; for (i = 0; i < count; i++) { for (j = 0; j < ni; j++) { out[i*no+j] += in[i*ni+j]; } } } static void _reduce(double *out, double **in, size_t count, size_t no, size_t ni) { unsigned int nthreads = omp_get_num_threads(); unsigned int thread_id = omp_get_thread_num(); size_t blksize = (count + nthreads - 1) / nthreads; size_t start = thread_id * blksize; size_t end = MIN(start + blksize, count); double *src; size_t it, i, j; for (it = 0; it < nthreads; it++) { src = in[it]; for (i = start; i < end; i++) { for (j = 0; j < ni; j++) { out[i*no+j] += src[i*ni+j]; } } } } /* * nlink = nocc*nvir, num. all possible strings that a string can link to * link_index[str0] == linking map between str0 and other strings * link_index[str0][ith-linking-string] == * [tril(creation_op,annihilation_op),0,linking-string-id,sign] * FCIcontract_2e_spin0 only compute half of the contraction, due to the * symmetry between alpha and beta spin. The right contracted ci vector * is (ci1+ci1.T) */ void FCIcontract_2e_spin0(double *eri, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlink * na); FCIcompress_link_tril(clink, link_index, na, nlink); NPdset0(ci1, ((size_t)na) * na); double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < na; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, na-ib); NPdset0(ci1buf, ((size_t)na) * blen); #pragma omp for schedule(static, 112) /* strk starts from MAX(strk0, ib), because [0:ib,0:ib] have been evaluated */ for (strk = ib; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, MIN(STRB_BLKSIZE, strk-ib), blen, MIN(STRB_BLKSIZE, strk+1-ib), strk, ib, norb, na, na, nlink, nlink, clink, clink); } // NPomp_dsum_reduce_inplace(ci1bufs, blen*na); //#pragma omp master // FCIaxpy2d(ci1+ib, ci1buf, na, na, blen); #pragma omp barrier _reduce(ci1+ib, ci1bufs, na, na, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_rhf2e_kern #pragma omp barrier } free(ci1buf); free(t1buf); } free(clink); } void FCIcontract_2e_spin1(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); NPdset0(ci1, ((size_t)na) * nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); NPdset0(ci1buf, ((size_t)na) * blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, blen, blen, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } // NPomp_dsum_reduce_inplace(ci1bufs, blen*na); //#pragma omp master // FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); #pragma omp barrier _reduce(ci1+ib, ci1bufs, na, nb, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_rhf2e_kern #pragma omp barrier } free(ci1buf); free(t1buf); } free(clinka); free(clinkb); } /* * eri_ab is mixed integrals (alpha,alpha|beta,beta), |beta,beta) in small strides */ static void ctr_uhf2e_kern(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1a = t1buf; double *t1b = t1a + nnorb*bcount; double *vt1 = t1b + nnorb*bcount; int i; for (i = 0; i < nnorb*bcount; i++) { t1a[i] = 0; t1b[i] = 0; } FCIprog_a_t1(ci0, t1a, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1b, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_T, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_ab, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_bb, &nnorb, &D1, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_aa, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_ab, &nnorb, &D1, vt1, &bcount); FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0, norb, bcount, nlinka, clink_indexa); } void FCIcontract_uhf2e(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); NPdset0(ci1, ((size_t)na) * nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)*2+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); NPdset0(ci1buf, ((size_t)na) * blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_uhf2e_kern(eri_aa, eri_ab, eri_bb, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } // NPomp_dsum_reduce_inplace(ci1bufs, blen*na); //#pragma omp master // FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); #pragma omp barrier _reduce(ci1+ib, ci1bufs, na, nb, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_uhf2e_kern #pragma omp barrier } free(t1buf); free(ci1buf); } free(clinka); free(clinkb); } /************************************************* * hdiag *************************************************/ void FCImake_hdiag_uhf(double *hdiag, double *h1e_a, double *h1e_b, double *jdiag_aa, double *jdiag_ab, double *jdiag_bb, double *kdiag_aa, double *kdiag_bb, int norb, int nstra, int nstrb, int nocca, int noccb, int *occslista, int *occslistb) { #pragma omp parallel { int j, j0, k0, jk, jk0; size_t ia, ib; double e1, e2; int *paocc, *pbocc; #pragma omp for schedule(static) for (ia = 0; ia < nstra; ia++) { paocc = occslista + ia * nocca; for (ib = 0; ib < nstrb; ib++) { e1 = 0; e2 = 0; pbocc = occslistb + ib * noccb; for (j0 = 0; j0 < nocca; j0++) { j = paocc[j0]; jk0 = j * norb; e1 += h1e_a[j*norb+j]; for (k0 = 0; k0 < nocca; k0++) { // (alpha|alpha) jk = jk0 + paocc[k0]; e2 += jdiag_aa[jk] - kdiag_aa[jk]; } for (k0 = 0; k0 < noccb; k0++) { // (alpha|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_ab[jk] * 2; } } for (j0 = 0; j0 < noccb; j0++) { j = pbocc[j0]; jk0 = j * norb; e1 += h1e_b[j*norb+j]; for (k0 = 0; k0 < noccb; k0++) { // (beta|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_bb[jk] - kdiag_bb[jk]; } } hdiag[ia*nstrb+ib] = e1 + e2 * .5; } } } } void FCImake_hdiag(double *hdiag, double *h1e, double *jdiag, double *kdiag, int norb, int na, int nocc, int *occslst) { FCImake_hdiag_uhf(hdiag, h1e, h1e, jdiag, jdiag, jdiag, kdiag, kdiag, norb, na, na, nocc, nocc, occslst, occslst); } static int first1(uint64_t r) { #ifdef HAVE_FFS return ffsll(r) - 1; #else int n = 0; if (r >> (n + 32)) n += 32; if (r >> (n + 16)) n += 16; if (r >> (n + 8)) n += 8; if (r >> (n + 4)) n += 4; if (r >> (n + 2)) n += 2; if (r >> (n + 1)) n += 1; return n; #endif } /************************************************* * pspace Hamiltonian, ref CPL, 169, 463 *************************************************/ /* * sub-space Hamiltonian (tril part) of the determinants (stra,strb) */ void FCIpspace_h0tril_uhf(double *h0, double *h1e_a, double *h1e_b, double *g2e_aa, double *g2e_ab, double *g2e_bb, uint64_t *stra, uint64_t *strb, int norb, int np) { const int d2 = norb * norb; const int d3 = norb * norb * norb; #pragma omp parallel { int i, j, k, pi, pj, pk, pl; int n1da, n1db; uint64_t da, db, str1; double tmp; #pragma omp for schedule(dynamic) for (i = 0; i < np; i++) { for (j = 0; j < i; j++) { da = stra[i] ^ stra[j]; db = strb[i] ^ strb[j]; n1da = FCIpopcount_1(da); n1db = FCIpopcount_1(db); switch (n1da) { case 0: switch (n1db) { case 2: pi = first1(db & strb[i]); pj = first1(db & strb[j]); tmp = h1e_b[pi*norb+pj]; for (k = 0; k < norb; k++) { if (stra[i] & (1ULL<<k)) { tmp += g2e_ab[pi*norb+pj+k*d3+k*d2]; } if (strb[i] & (1ULL<<k)) { tmp += g2e_bb[pi*d3+pj*d2+k*norb+k] - g2e_bb[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, strb[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 4: pi = first1(db & strb[i]); pj = first1(db & strb[j]); pk = first1((db & strb[i]) ^ (1ULL<<pi)); pl = first1((db & strb[j]) ^ (1ULL<<pj)); str1 = strb[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, strb[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_bb[pi*d3+pj*d2+pk*norb+pl] - g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_bb[pi*d3+pj*d2+pk*norb+pl] + g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } } break; case 2: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); tmp = h1e_a[pi*norb+pj]; for (k = 0; k < norb; k++) { if (strb[i] & (1ULL<<k)) { tmp += g2e_ab[pi*d3+pj*d2+k*norb+k]; } if (stra[i] & (1ULL<<k)) { tmp += g2e_aa[pi*d3+pj*d2+k*norb+k] - g2e_aa[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, stra[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 2: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1(db & strb[i]); pl = first1(db & strb[j]); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, strb[j]) > 0) { h0[i*np+j] = g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } else { h0[i*np+j] =-g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } } break; case 4: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1((da & stra[i]) ^ (1ULL<<pi)); pl = first1((da & stra[j]) ^ (1ULL<<pj)); str1 = stra[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_aa[pi*d3+pj*d2+pk*norb+pl] - g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_aa[pi*d3+pj*d2+pk*norb+pl] + g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } } break; } } } } } void FCIpspace_h0tril(double *h0, double *h1e, double *g2e, uint64_t *stra, uint64_t *strb, int norb, int np) { FCIpspace_h0tril_uhf(h0, h1e, h1e, g2e, g2e, g2e, stra, strb, norb, np); } /*********************************************************************** * * With symmetry * * Note the ordering in eri and the index in link_index * eri is a tril matrix, it should be reordered wrt the irrep of the * direct product E_i^j. The 2D array eri(ij,kl) is a diagonal block * matrix. Each block is associated with an irrep. * link_index[str_id,pair_id,0] which is the index of pair_id, should be * reordered wrt the irreps accordingly * * dimirrep stores the number of occurence for each irrep * ***********************************************************************/ static void pick_link_by_irrep(_LinkTrilT *clink, int *link_index, int nstr, int nlink, int eri_irrep) { int i, j, k; for (i = 0; i < nstr; i++) { for (k = 0, j = 0; k < nlink; k++) { if (link_index[k*4+1] == eri_irrep) { clink[j].ia = link_index[k*4+0]; clink[j].addr = link_index[k*4+2]; clink[j].sign = link_index[k*4+3]; j++; } } if (j < nlink) { clink[j].sign = 0; } clink += nlink; link_index += nlink * 4; } } static void ctr_rhf2esym_kern1(double *eri, double *ci0, double *ci1ab, double *ci1buf, double *t1buf, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int nnorb, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; NPdset0(t1, nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, 0, nb, nlinka, clink_indexa); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1ab, vt1, bcount, stra_id, strb_id, 0, nb_intermediate, nlinkb, clink_indexb); spread_bufa_t1(ci1buf, vt1, bcount, bcount, stra_id, 0, 0, ncol_ci1buf, nlinka, clink_indexa); } static void loop_c2e_symm1(double *eri, double *ci0, double *ci1aa, double *ci1ab, int nnorb, int na_intermediate, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clinka, _LinkTrilT *clinkb) { double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*nnorb*2+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); NPdset0(ci1buf, ((size_t)na) * blen); #pragma omp for schedule(static) for (strk = 0; strk < na_intermediate; strk++) { ctr_rhf2esym_kern1(eri, ci0, ci1ab, ci1buf, t1buf, blen, blen, strk, ib, nnorb, nb_intermediate, na, nb, nlinka, nlinkb, clinka, clinkb); } // NPomp_dsum_reduce_inplace(ci1bufs, blen*na); //#pragma omp master // FCIaxpy2d(ci1aa+ib, ci1buf, na, nb, blen); #pragma omp barrier _reduce(ci1aa+ib, ci1bufs, na, nb, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_rhf2esym_kern1 #pragma omp barrier } free(ci1buf); free(t1buf); } } #define TOTIRREPS 8 void FCIcontract_2e_symm1(double **eris, double **ci0, double **ci1, int norb, int *nas, int *nbs, int nlinka, int nlinkb, int **linka, int **linkb, int *dimirrep, int wfnsym) { int i; int na = 0; int nb = 0; for (i = 0; i < TOTIRREPS; i++) { na = MAX(nas[i], na); nb = MAX(nbs[i], nb); } _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); int ai_ir, stra_ir, strb_ir, intera_ir, interb_ir, ma, mb; for (stra_ir = 0; stra_ir < TOTIRREPS; stra_ir++) { for (ai_ir = 0; ai_ir < TOTIRREPS; ai_ir++) { strb_ir = wfnsym^stra_ir; ma = nas[stra_ir]; mb = nbs[strb_ir]; if (ma > 0 && mb > 0 && dimirrep[ai_ir] > 0) { intera_ir = ai_ir^stra_ir; interb_ir = ai_ir^strb_ir; // clinka for inter_ir*ai_ir -> stra_ir pick_link_by_irrep(clinka, linka[intera_ir], nas[intera_ir], nlinka, ai_ir); // clinka for strb_ir*ai_ir -> inter_ir pick_link_by_irrep(clinkb, linkb[strb_ir], nbs[strb_ir], nlinkb, ai_ir); loop_c2e_symm1(eris[ai_ir], ci0[stra_ir], ci1[stra_ir], ci1[intera_ir], dimirrep[ai_ir], nas[intera_ir], nbs[interb_ir], ma, mb, nlinka, nlinkb, clinka, clinkb); } } } free(clinka); free(clinkb); }
kmeans_h2o4gpu.h
/*! * Copyright 2017-2018 H2O.ai, Inc. * License Apache License Version 2.0 (see LICENSE for details) */ #pragma once #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include "kmeans_labels.h" #include "kmeans_centroids.h" template<typename T> struct count_functor { T* pairwise_distances_ptr; int* counts_ptr; int k; int rows_per_run; count_functor(T* _pairwise_distances_ptr, int* _counts_ptr, int _k, int _rows_per_run) { pairwise_distances_ptr = _pairwise_distances_ptr; counts_ptr = _counts_ptr; k = _k; rows_per_run = _rows_per_run; } __device__ void operator()(int idx) const { int closest_centroid_idx = 0; T best_distance = pairwise_distances_ptr[idx]; // FIXME potentially slow due to striding for (int i = 1; i < k; i++) { T distance = pairwise_distances_ptr[idx + i * rows_per_run]; if (distance < best_distance) { best_distance = distance; closest_centroid_idx = i; } } atomicAdd(&counts_ptr[closest_centroid_idx], 1); } }; /** * Calculates closest centroid for each record and counts how many points are assigned to each centroid. * @tparam T * @param verbose * @param num_gpu * @param rows_per_gpu * @param cols * @param data * @param data_dots * @param centroids * @param weights * @param pairwise_distances * @param labels */ template<typename T> void count_pts_per_centroid( int verbose, int num_gpu, int rows_per_gpu, int cols, thrust::device_vector<T> **data, thrust::device_vector<T> **data_dots, thrust::host_vector<T> centroids, thrust::host_vector<T> &weights ) { int k = centroids.size() / cols; #pragma omp parallel for for (int i = 0; i < num_gpu; i++) { thrust::host_vector<int> weights_tmp(weights.size()); CUDACHECK(cudaSetDevice(i)); thrust::device_vector<T> centroid_dots(k); thrust::device_vector<T> d_centroids = centroids; thrust::device_vector<int> counts(k); kmeans::detail::batch_calculate_distances(verbose, 0, rows_per_gpu, cols, k, *data[i], d_centroids, *data_dots[i], centroid_dots, [&](int rows_per_run, size_t offset, thrust::device_vector<T> &pairwise_distances) { auto counting = thrust::make_counting_iterator(0); auto counts_ptr = thrust::raw_pointer_cast(counts.data()); auto pairwise_distances_ptr = thrust::raw_pointer_cast(pairwise_distances.data()); thrust::for_each(counting, counting + rows_per_run, count_functor<T>(pairwise_distances_ptr, counts_ptr, k, rows_per_run) ); } ); kmeans::detail::memcpy(weights_tmp, counts); kmeans::detail::streamsync(i); for (int p = 0; p < k; p++) { weights[p] += weights_tmp[p]; } } }
sufsort.c
#include <stdio.h> #include <math.h> #ifdef __OPENMP__ #include <omp.h> #endif #ifdef __TIMING__ #include <time.h> #include <sys/time.h> #endif #include <unistd.h> #include <math.h> #include <nmmintrin.h> #include "sufsort.h" #include "common.h" /* -------------------------------------------------------------------------- */ /* global tables for triplet computation */ char triplet_table[85] = { 0, 1, 22, 43, 64, 2, 7, 12, 17, 23, 28, 33, 38, 44, 49, 54, 59, 65, 70, 75, 80, 3, 4, 5, 6, 8, 9, 10, 11, 13, 14, 15, 16, 18, 19, 20, 21, 24, 25, 26, 27, 29, 30, 31, 32, 34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48, 50, 51, 52, 53, 55, 56, 57, 58, 60, 61, 62, 63, 66, 67, 68, 69, 71, 72, 73, 74, 76, 77, 78, 79, 81, 82, 83, 84 }; char cool_table[SLOT_DEPTH][SLOT_NUM] = { {3, 4, 5, 6, 8, 9, 10, 11, 13, 14, 15, 16, 0, 0, 0, 0, 0}, {18, 19, 20, 21, 24, 25, 26, 27, 29, 30, 31, 32, 0, 0, 0, 0, 0}, {34, 35, 36, 37, 39, 40, 41, 42, 45, 46, 47, 48, 50, 0, 0, 0, 0}, {51, 52, 53, 55, 56, 57, 58, 60, 61, 62, 63, 66, 67, 0, 0, 0, 0}, {68, 69, 71, 72, 73, 74, 76, 77, 78, 79, 81, 82, 83, 84, 0, 0, 0} }; int cool[SLOT_DEPTH] = { 12, 12, 13, 13, 14 }; // SSE4 strcmp inline int STNI_strcmp (char *p1, int n1, char *p2, int n2) { #define MODE (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | _SIDD_NEGATIVE_POLARITY) __m128i smm1 = _mm_loadu_si128 ((__m128i *) p1); __m128i smm2 = _mm_loadu_si128 ((__m128i *) p2); int ResultIndex = 0; int remain_size = n1 < n2 ? n1 : n2; while (1) { ResultIndex = _mm_cmpistri (smm1, smm2, MODE); if (ResultIndex != 16 || ResultIndex > remain_size - 1) { break; } remain_size = remain_size - 16; p1 += 16; p2 += 16; smm1 = _mm_loadu_si128 ((__m128i *) p1); smm2 = _mm_loadu_si128 ((__m128i *) p2); } if (ResultIndex > remain_size - 1) { return n1 - n2; } p1 = (char *) &smm1; p2 = (char *) &smm2; return p1[ResultIndex] - p2[ResultIndex]; #undef MODE } void transform_to_triplet_reverse (char *reads, int read_len, INDEX_TYPE_T num_reads) { INDEX_TYPE_T i; #ifdef __OPENMP__ #pragma omp parallel default(none) private(i) shared(triplet_table, read_len, reads, num_reads) { #endif char *new_read = (char *) malloc (sizeof (char) * read_len); #ifdef __OPENMP__ #pragma omp for #endif for (i = 0; i < num_reads; i++) { INDEX_TYPE_T start = read_len * (i + 1) - 1; int j; char t0 = reads[start] - 1; char t1 = reads[start - 1] - 1; char t2 = reads[start - 2] - 1; char s = t0 * 16 + t1 * 4 + t2; new_read[0] = reads[start] = triplet_table[s + 21]; for (j = 1; j < read_len - 2; j++) { s = s & (char) 15; s = s * 4 + reads[start - j - 2] - 1; new_read[j] = reads[start - j] = triplet_table[s + 21]; } s = s & (char) 15; new_read[read_len - 2] = reads[start - (read_len - 2)] = triplet_table[5 + s]; s = s & (char) 3; new_read[read_len - 1] = reads[start - (read_len - 1)] = triplet_table[1 + s]; memcpy (reads + i * read_len, new_read, sizeof (char) * read_len); } free (new_read); #ifdef __OPENMP__ } #endif } void transform_to_triplet (char *reads, int read_len, INDEX_TYPE_T num_reads) { INDEX_TYPE_T i; #ifdef __OPENMP__ #pragma omp parallel for #endif for (i = 0; i < num_reads; i++) { INDEX_TYPE_T start = read_len * i; int j; char t0 = reads[start] - 1; char t1 = reads[start + 1] - 1; char t2 = reads[start + 2] - 1; char s = t0 * 16 + t1 * 4 + t2; reads[start] = triplet_table[s + 21]; for (j = 1; j < read_len - 2; j++) { s = s & (char) 15; s = s * 4 + reads[start + j + 2] - 1; reads[start + j] = triplet_table[s + 21]; } s = s & (char) 15; reads[start + read_len - 2] = triplet_table[5 + s]; s = s & (char) 3; reads[start + read_len - 1] = triplet_table[1 + s]; } } // when the bucket size is smaller than some threshold, use insertion sort static void insert_sort (INDEX_TYPE_T * sa, char *reads, INDEX_TYPE_T start, int pos, INDEX_TYPE_T num_elems, int read_len, INDEX_TYPE_T * interval_start, INDEX_TYPE_T * interval_end, u16 * interval_depth, omp_lock_t * read_lock, INDEX_TYPE_T bkt_offset, int min_overlap) { int i; int j; INDEX_TYPE_T tmp; INDEX_TYPE_T *indices = &sa[start]; INDEX_TYPE_T *new_index; INDEX_TYPE_T tmp_index; char *read_i; char *read_j; int len_i; int len_j; INDEX_TYPE_T read_num; new_index = (INDEX_TYPE_T *) malloc (sizeof (INDEX_TYPE_T) * num_elems); assert (new_index); tmp = indices[0]; len_i = read_len - tmp % read_len - pos; if (len_i == 0 && pos <= min_overlap) { read_num = tmp / read_len; omp_set_lock (&read_lock[read_num]); if (interval_depth[read_num] < pos) { interval_start[read_num] = start + bkt_offset; interval_depth[read_num] = pos; interval_end[read_num] = num_elems; } omp_unset_lock (&read_lock[read_num]); } for (i = 1; i < num_elems; i++) { tmp = indices[i]; tmp_index = i; read_i = &reads[tmp + pos]; len_i = read_len - tmp % read_len - pos; if (len_i == 0 && pos <= min_overlap) { read_num = tmp / read_len; omp_set_lock (&read_lock[read_num]); if (interval_depth[read_num] < pos) { interval_start[read_num] = start + bkt_offset; interval_depth[read_num] = pos; interval_end[read_num] = num_elems; } omp_unset_lock (&read_lock[read_num]); } for (j = i - 1; j >= 0; j--) { len_j = read_len - indices[j] % read_len - pos; read_j = &reads[indices[j] + pos]; #ifdef __WITH_STNI__ if (STNI_strcmp (read_i, len_i, read_j, len_j) >= 0) { break; } #else if (strncmp (read_i, read_j, min(len_i, len_j)) > 0) { break; } else if (strncmp (read_i, read_j, min(len_i, len_j)) == 0 && len_i >= len_j) { break; } #endif indices[j + 1] = indices[j]; new_index[j + 1] = new_index[j]; } indices[j + 1] = tmp; new_index[j + 1] = tmp_index; } free (new_index); } // non-recursive sort static void process_slot_0 (INDEX_TYPE_T * X, char *reads, INDEX_TYPE_T start, INDEX_TYPE_T num_elems, int pos, int read_len, INDEX_TYPE_T * offsets, INDEX_TYPE_T * frequencies) { INDEX_TYPE_T new_pos[NUM_SLOTS]; int id; int read_of; INDEX_TYPE_T i; int bucket; INDEX_TYPE_T *indices = &X[start]; INDEX_TYPE_T *new_indices = (INDEX_TYPE_T *) malloc (num_elems * sizeof (INDEX_TYPE_T)); assert (new_indices != NULL); char *new_reads = (char *) malloc (num_elems * sizeof (char)); assert (new_reads != NULL); memset (frequencies, 0, NUM_SLOTS * sizeof (INDEX_TYPE_T)); for (i = 0; i < num_elems; ++i) { read_of = (indices[i] % read_len) + pos; id = (read_of >= read_len) ? (0) : (reads[indices[i] + pos]); new_reads[i] = id; frequencies[id]++; } new_pos[0] = offsets[0] = start; for (bucket = 1; bucket < NUM_SLOTS; ++bucket) { new_pos[bucket] = offsets[bucket] = offsets[bucket - 1] + frequencies[bucket - 1]; } for (i = 0; i < num_elems; ++i) { id = new_reads[i]; new_indices[new_pos[id] - start] = indices[i]; new_pos[id]++; } memcpy (indices, new_indices, num_elems * sizeof (INDEX_TYPE_T)); free (new_indices); free (new_reads); } // recursive sort static void process_slot_1 (INDEX_TYPE_T * X, INDEX_TYPE_T * new_indices, char *new_reads, char *reads, INDEX_TYPE_T start, INDEX_TYPE_T num_elems, int pos, int read_len, INDEX_TYPE_T * interval_start, INDEX_TYPE_T * interval_end, u16 * interval_depth, omp_lock_t * read_lock, INDEX_TYPE_T bkt_offset, int min_overlap) { INDEX_TYPE_T offsets[NUM_SLOTS]; INDEX_TYPE_T frequencies[NUM_SLOTS]; int id; int read_of; INDEX_TYPE_T i; int bucket; INDEX_TYPE_T *indices = &X[start]; int free_new_indices = 0; INDEX_TYPE_T read_num; if (new_indices == NULL) { new_indices = (INDEX_TYPE_T *) malloc (num_elems * sizeof (INDEX_TYPE_T)); assert (new_indices != NULL); new_reads = (char *) malloc (num_elems * sizeof (char)); assert (new_reads != NULL); free_new_indices = 1; } memset (frequencies, 0, NUM_SLOTS * sizeof (INDEX_TYPE_T)); for (i = 0; i < num_elems; ++i) { read_of = (indices[i] % read_len) + pos; id = (read_of >= read_len) ? (0) : (reads[indices[i] + pos]); if (pos == read_len - (indices[i] % read_len) && pos <= min_overlap) { read_num = indices[i] / read_len; // precompute the interval for fast overlap finding omp_set_lock (&read_lock[read_num]); if (interval_depth[read_num] < pos) { interval_start[read_num] = start + bkt_offset; interval_depth[read_num] = pos; interval_end[read_num] = num_elems; } omp_unset_lock (&read_lock[read_num]); } new_reads[i] = id; frequencies[id]++; } offsets[0] = start; for (bucket = 1; bucket < NUM_SLOTS; ++bucket) { offsets[bucket] = offsets[bucket - 1] + frequencies[bucket - 1]; } for (i = 0; i < num_elems; ++i) { id = new_reads[i]; new_indices[offsets[id] - start] = indices[i]; offsets[id]++; } memcpy (indices, new_indices, num_elems * sizeof (INDEX_TYPE_T)); if (pos < read_len) { for (i = 21; i <= 84; i++) { int k = triplet_table[i]; if (frequencies[k] > 1 && frequencies[k] * (read_len - pos) < INSERT_SORT_SIZE) { insert_sort (X, reads, offsets[k - 1], pos + 3, frequencies[k], read_len, interval_start, interval_end, interval_depth, read_lock, bkt_offset, min_overlap); } else if (frequencies[k] > 1) { process_slot_1 (X, new_indices, new_reads, reads, offsets[k - 1], frequencies[k], pos + 3, read_len, interval_start, interval_end, interval_depth, read_lock, bkt_offset, min_overlap); } } } if (free_new_indices == 1) { free (new_indices); free (new_reads); } } // NOTE: we do not malloc real suffix array in this step, only get the bucket size. void initial_sort (char *reads, INDEX_TYPE_T num_elems, bucket_t * bkt, INDEX_TYPE_T * bsize, INDEX_TYPE_T * boffset, INDEX_TYPE_T * max_size) { INDEX_TYPE_T i; int j; int k; INDEX_TYPE_T *offsets = bkt->offs; INDEX_TYPE_T *frequencies = bkt->freqs; INDEX_TYPE_T msize; memset (frequencies, 0, NUM_SLOTS * sizeof (INDEX_TYPE_T)); #ifdef __OPENMP__ omp_lock_t lock; omp_init_lock (&lock); #pragma omp parallel default(none) private(i, j, k) shared(lock, num_elems, reads, frequencies) { #endif INDEX_TYPE_T freq[NUM_SLOTS]; memset (freq, 0, NUM_SLOTS * sizeof (INDEX_TYPE_T)); #ifdef __OPENMP__ #pragma omp for nowait #endif for (i = 0; i < num_elems; i++) { freq[(int) reads[i]]++; } #ifdef __OPENMP__ omp_set_lock (&lock); #endif for (j = 0; j < NUM_SLOTS; j++) { frequencies[j] += freq[j]; } #ifdef __OPENMP__ omp_unset_lock (&lock); omp_destroy_lock (&lock); } #endif msize = 0; for (k = 0; k < SLOT_DEPTH; k++) { offsets[k * SLOT_NUM] = 0; bsize[k] = frequencies[k * SLOT_NUM]; for (j = 1; j < SLOT_NUM; j++) { bsize[k] += frequencies[k * SLOT_NUM + j]; offsets[k * SLOT_NUM + j] = offsets[k * SLOT_NUM + j - 1] + frequencies[k * SLOT_NUM + j - 1]; } if (bsize[k] > msize) msize = bsize[k]; } boffset[0] = 0; for (k = 1; k < SLOT_DEPTH; k++) { boffset[k] = boffset[k - 1] + bsize[k - 1]; } *max_size = msize; } void suf_sort (INDEX_TYPE_T * sa, char *reads, int slot, INDEX_TYPE_T * new_idx, bucket_t * bkt0, INDEX_TYPE_T num_elems, int read_len, INDEX_TYPE_T * interval_start, INDEX_TYPE_T * interval_end, u16 * interval_depth, omp_lock_t * read_lock, INDEX_TYPE_T bkt_offset, int min_overlap) { int size; int index; int new_index; bucket_t *bkt_queue = NULL; INDEX_TYPE_T i; int pos; int inter; int j; size = 0; for (j = 0; j < SEQ_DEPTH / 3; j++) { size += pow (64, j); } bkt_queue = (bucket_t *) calloc (size, sizeof (bucket_t)); assert (bkt_queue != NULL); // STEP 1: postprocessing of intial sort #ifdef __OPENMP__ omp_lock_t *vlock = (omp_lock_t *) malloc (sizeof (omp_lock_t) * SLOT_NUM); assert (vlock != NULL); #pragma omp parallel default(none) private(i, j) shared(sa, slot, vlock, num_elems, reads, new_idx) { #pragma omp for for (j = 0; j < SLOT_NUM; j++) { omp_init_lock (&vlock[j]); } #endif int jj; int k; INDEX_TYPE_T sa_buf[SLOT_NUM][128]; INDEX_TYPE_T count[SLOT_NUM]; memset (count, 0, SLOT_NUM * sizeof (INDEX_TYPE_T)); #ifdef __OPENMP__ #pragma omp for #endif for (i = 0; i < num_elems; i++) { INDEX_TYPE_T m; int n; int p; n = reads[i]; if (n >= slot * SLOT_NUM && n < (slot + 1) * SLOT_NUM) { p = n - SLOT_NUM * slot; sa_buf[p][count[p]] = i; count[p]++; if (count[p] == 128) { #ifdef __OPENMP__ omp_set_lock (&vlock[p]); #endif m = new_idx[n]; new_idx[n] = new_idx[n] + 128; #ifdef __OPENMP__ omp_unset_lock (&vlock[p]); #endif memcpy (&sa[m], sa_buf[p], 128 * sizeof (INDEX_TYPE_T)); count[p] = 0; } } } #ifdef __OPENMP__ omp_set_lock (&vlock[0]); #endif for (jj = 0; jj < SLOT_NUM; jj++) { for (k = 0; k < count[jj]; k++) { int p = slot * SLOT_NUM + jj; sa[new_idx[p]] = sa_buf[jj][k]; new_idx[p] = new_idx[p] + 1; } } #ifdef __OPENMP__ omp_unset_lock (&vlock[0]); } #pragma omp parallel for for (j = 0; j < SLOT_NUM; j++) { omp_destroy_lock (&vlock[j]); } free (vlock); #endif // STEP 2: parallel LS sort // TODO: Merge the code inter = cool[slot]; #ifdef __OPENMP__ #pragma omp parallel for schedule(dynamic, 1) #endif for (j = 0; j < inter; j++) { int k = cool_table[slot][j]; bucket_t *curr = bkt0; if (curr->freqs[k] > 1) { bucket_t bkt; process_slot_0 (sa, reads, curr->offs[k], curr->freqs[k], 3, read_len, bkt.offs, bkt.freqs); bkt_queue[j] = bkt; } } index = 0; new_index = inter; pos = 6; while (pos < SEQ_DEPTH) { inter = 64 * inter; #ifdef __OPENMP__ #pragma omp parallel for schedule(dynamic, 1) #endif for (j = 0; j < inter; j++) { int m = j / 64; int n = j % 64; bucket_t *curr = &(bkt_queue[index + m]); int k = triplet_table[n + 21]; if (curr->freqs[k] > 1) { bucket_t *bkt = &(bkt_queue[new_index + j]); process_slot_0 (sa, reads, curr->offs[k], curr->freqs[k], pos, read_len, bkt->offs, bkt->freqs); } } index = new_index; new_index += inter; pos += 3; } // STEP 3: recursive sort #ifdef __OPENMP__ #pragma omp parallel for schedule(dynamic, 1) #endif for (j = index * 64; j < new_index * 64; j++) { int m = j / 64; int n = j % 64 + 21; bucket_t *curr = &(bkt_queue[m]); int k = triplet_table[n]; if (curr->freqs[k] > 1) { process_slot_1 (sa, NULL, NULL, reads, curr->offs[k], curr->freqs[k], pos, read_len, interval_start, interval_end, interval_depth, read_lock, bkt_offset, min_overlap); } } free (bkt_queue); }
IJMatrix_parcsr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "_hypre_parcsr_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixCreateParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_ParCSRMatrix *par_matrix; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_procs; HYPRE_Int i; hypre_MPI_Comm_size(comm,&num_procs); #ifdef HYPRE_NO_GLOBAL_PARTITION row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); if (hypre_IJMatrixGlobalFirstRow(matrix)) { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix); } } else { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i]; } } if (row_partitioning != col_partitioning) { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); if (hypre_IJMatrixGlobalFirstCol(matrix)) { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix); } } else { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i]; } } } else { col_starts = row_starts; } par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix), hypre_IJMatrixGlobalNumCols(matrix), row_starts, col_starts, 0, 0, 0); #else row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); if (row_partitioning[0]) { for (i = 0; i < num_procs+1; i++) { row_starts[i] = row_partitioning[i]-row_partitioning[0]; } } else { for (i = 0; i < num_procs+1; i++) { row_starts[i] = row_partitioning[i]; } } if (row_partitioning != col_partitioning) { col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); if (col_partitioning[0]) { for (i = 0; i < num_procs+1; i++) { col_starts[i] = col_partitioning[i]-col_partitioning[0]; } } else { for (i = 0; i < num_procs+1; i++) { col_starts[i] = col_partitioning[i]; } } } else { col_starts = row_starts; } par_matrix = hypre_ParCSRMatrixCreate(comm, row_starts[num_procs], col_starts[num_procs], row_starts, col_starts, 0, 0, 0); #endif hypre_IJMatrixObject(matrix) = par_matrix; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetRowSizesParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *sizes) { HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else HYPRE_Int my_id; hypre_MPI_Comm_rank(hypre_IJMatrixComm(matrix), &my_id); local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); } if (!row_space) { row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } for (i = 0; i < local_num_rows; i++) { row_space[i] = sizes[i]; } if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space; #if defined(HYPRE_USING_CUDA) hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0; for (i = 0; i < local_num_rows; i++) { hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i]; } #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetDiagOffdSizesParCSR * sets diag_i inside the diag part of the ParCSRMatrix * and offd_i inside the offd part, * requires exact row sizes for diag and offd * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *diag_sizes, const HYPRE_Int *offd_sizes) { HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else HYPRE_Int my_id; hypre_MPI_Comm_rank(hypre_IJMatrixComm(matrix), &my_id); local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOnProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_on_proc_elmts) { #if defined(HYPRE_USING_CUDA) hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm,&my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOffProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_off_proc_elmts) { hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm,&my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; #if defined(HYPRE_USING_CUDA) hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixInitializeParCSR * * initializes AuxParCSRMatrix and ParCSRMatrix as necessary * *****************************************************************************/ HYPRE_Int hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix) { return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle())); } HYPRE_Int hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); HYPRE_Int local_num_rows; HYPRE_MemoryLocation memory_location_aux = hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; if (hypre_IJMatrixAssembleFlag(matrix) == 0) { if (!par_matrix) { hypre_IJMatrixCreateParCSR(matrix); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); } local_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix)); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location); hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux); if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { HYPRE_Int i, *indx_diag, *indx_offd, *diag_i, *offd_i, *diag_sizes, *offd_sizes; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); diag_i = hypre_CSRMatrixI(diag); offd_i = hypre_CSRMatrixI(offd); diag_sizes = hypre_AuxParCSRMatrixDiagSizes(aux_matrix); offd_sizes = hypre_AuxParCSRMatrixOffdSizes(aux_matrix); indx_diag = hypre_AuxParCSRMatrixIndxDiag(aux_matrix); indx_offd = hypre_AuxParCSRMatrixIndxOffd(aux_matrix); for (i = 0; i < local_num_rows; i++) { diag_i[i+1] = diag_i[i] + diag_sizes[i]; } hypre_CSRMatrixNumNonzeros(diag) = diag_i[local_num_rows]; hypre_CSRMatrixInitialize(diag); for (i = 0; i < local_num_rows; i++) { offd_i[i+1] = offd_i[i] + offd_sizes[i]; } hypre_CSRMatrixNumNonzeros(offd) = offd_i[local_num_rows]; hypre_CSRMatrixInitialize(offd); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_num_rows; i++) { indx_diag[i] = diag_i[i]; indx_offd[i] = offd_i[i]; } } } else if ( memory_location_aux == HYPRE_MEMORY_HOST ) { /* AB 4/06 - the assemble routine destroys the aux matrix - so we need to recreate if initialize is called again */ if (!aux_matrix) { local_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix)); hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL); hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST; hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetRowCountsParCSR * * gets the number of columns for rows specified by the user * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_BigInt *rows, HYPRE_Int *ncols) { HYPRE_BigInt row_index; MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int i, my_id, pstart, index; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION pstart = 0; #else pstart = my_id; #endif #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE #endif for (i=0; i < nrows; i++) { row_index = rows[i]; if (row_index >= row_partitioning[pstart] && row_index < row_partitioning[pstart+1]) { /* compute local row number */ index = (HYPRE_Int)(row_index - row_partitioning[pstart]); ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index]; } else { ncols[i] = 0; if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row_index, my_id); } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetValuesParCSR * * gets values of an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, HYPRE_BigInt *rows, HYPRE_BigInt *cols, HYPRE_Complex *values) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix); hypre_CSRMatrix *diag; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; hypre_CSRMatrix *offd; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); #endif HYPRE_Int i, j, n, ii, indx, pstart; HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, row, col_indx, first; HYPRE_Int row_local, row_size; HYPRE_Int warning = 0; HYPRE_Int *counter; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (assemble_flag == 0) { hypre_error_in_arg(1); if (print_level) { hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n"); } } #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_starts[0]; col_n = col_starts[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_starts[my_id]; col_n = col_starts[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); } if (nrows < 0) { nrows = -nrows; counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); counter[0] = 0; for (i=0; i < nrows; i++) { counter[i+1] = counter[i]+ncols[i]; } indx = 0; for (i=0; i < nrows; i++) { row = rows[i]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); row_size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (counter[i]+row_size > counter[nrows]) { hypre_error_in_arg(1); if (print_level) { hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n"); } } if (ncols[i] < row_size) { warning = 1; } for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++) { cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0; values[indx++] = diag_data[j]; } for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++) { cols[indx] = col_map_offd[offd_j[j]]; values[indx++] = offd_data[j]; } counter[i+1] = indx; } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } if (warning) { for (i=0; i < nrows; i++) { ncols[i] = counter[i+1] - counter[i]; } if (print_level) { hypre_printf ("Warning! ncols has been changed!\n"); } } hypre_TFree(counter, HYPRE_MEMORY_HOST); } else { indx = 0; for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols[ii]; if (n == 0) /* empty row */ { continue; } if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ for (i=0; i < n; i++) { col_indx = cols[indx] - first; values[indx] = 0.0; if (col_indx < col_0 || col_indx > col_n) /* search in offd */ { for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++) { if (col_map_offd[offd_j[j]] == col_indx) { values[indx] = offd_data[j]; break; } } } else /* search in diag */ { col_indx = col_indx - col_0; for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++) { if (diag_j[j] == (HYPRE_Int)col_indx) { values[indx] = diag_data[j]; break; } } } indx++; } } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetValuesParCSR * * sets values in an IJMatrix before assembly, * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; //HYPRE_Int row_len; HYPRE_BigInt col_0, col_n, row; HYPRE_Int i, ii, j, n, not_found; //HYPRE_Int col_indx, cnt1; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt first; HYPRE_Int pstart; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; HYPRE_Int j_offd; for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1;*/ return hypre_error_flag; } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1; */ return hypre_error_flag; } } indx++; } } } } else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; HYPRE_Int col_j; offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { col_j = (HYPRE_Int)(cols[indx]-col_0); for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetConstantValuesParCSR * * sets all values in an already assembled IJMatrix to a constant value. * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Complex value ) { if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag); HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd); #if defined(HYPRE_USING_CUDA) if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( fill_n, diag_data, nnz_diag, value ); HYPRE_THRUST_CALL( fill_n, offd_data, nnz_offd, value ); } else #endif { HYPRE_Int ii; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_diag; ii++) { diag_data[ii] = value; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_offd; ii++) { offd_data[ii] = value; } } } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Matrix not assembled! Required to set constant values!"); } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_BigInt col_0, col_n; HYPRE_Int i, ii, j, n, not_found; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_BigInt first; HYPRE_Int pstart; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (hypre_IJMatrixAssembleFlag(matrix)) { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; HYPRE_Int j_offd; /* AB - 4/06 - need to get this object*/ aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; /* return -1; */ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } } indx++; } } /* not my row */ else { if (!aux_matrix) { size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } /* AB - 4/6 - the row should be negative to indicate an add */ /* UMY - 12/28/09 - now positive since we eliminated the feature of setting on other processors */ /* off_proc_i[off_proc_i_indx++] = row; */ off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1;*/ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0); for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixDestroyParCSR * * frees an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix) { hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix)); hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix)); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAssembleOffProcValsParCSR * * This is for handling set and get values calls to off-proc. entries - * it is called from matrix assemble. There is an alternate version for * when the assumed partition is being used. * *****************************************************************************/ #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix, HYPRE_Int off_proc_i_indx, HYPRE_Int max_off_proc_elmts, HYPRE_Int current_num_elmts, HYPRE_MemoryLocation memory_location, HYPRE_BigInt *off_proc_i, HYPRE_BigInt *off_proc_j, HYPRE_Complex *off_proc_data ) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Request *requests = NULL; hypre_MPI_Status *status = NULL; HYPRE_Int i, ii, j, j2, jj, n, row_index = 0; HYPRE_BigInt row; HYPRE_Int iii, iid, indx, ip; HYPRE_Int proc_id, num_procs, my_id; HYPRE_Int num_sends, num_sends3; HYPRE_Int num_recvs; HYPRE_Int num_requests; HYPRE_Int vec_start, vec_len; HYPRE_Int *send_procs; HYPRE_Int *chunks; HYPRE_BigInt *send_i; HYPRE_Int *send_map_starts; HYPRE_Int *dbl_send_map_starts; HYPRE_Int *recv_procs; HYPRE_Int *recv_chunks; HYPRE_BigInt *recv_i; HYPRE_Int *recv_vec_starts; HYPRE_Int *dbl_recv_vec_starts; HYPRE_Int *info; HYPRE_Int *int_buffer; HYPRE_Int *proc_id_mem; HYPRE_BigInt *partitioning; HYPRE_Int *displs; HYPRE_Int *recv_buf; HYPRE_Complex *send_data; HYPRE_Complex *recv_data; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); partitioning = hypre_IJMatrixRowPartitioning(matrix); info = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); chunks = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); proc_id_mem = hypre_CTAlloc(HYPRE_Int, off_proc_i_indx/2, HYPRE_MEMORY_HOST); j=0; for (i=0; i < off_proc_i_indx; i++) { row = off_proc_i[i++]; //if (row < 0) row = -row-1; n = (HYPRE_Int)off_proc_i[i]; proc_id = hypre_FindProc(partitioning,row,num_procs); proc_id_mem[j++] = proc_id; info[proc_id] += n; chunks[proc_id]++; } /* determine send_procs and amount of data to be sent */ num_sends = 0; for (i=0; i < num_procs; i++) { if (info[i]) { num_sends++; } } send_procs = hypre_CTAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); dbl_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); num_sends3 = 3*num_sends; int_buffer = hypre_CTAlloc(HYPRE_Int, 3*num_sends, HYPRE_MEMORY_HOST); j = 0; j2 = 0; send_map_starts[0] = 0; dbl_send_map_starts[0] = 0; for (i=0; i < num_procs; i++) { if (info[i]) { send_procs[j++] = i; send_map_starts[j] = send_map_starts[j-1]+2*chunks[i]+info[i]; dbl_send_map_starts[j] = dbl_send_map_starts[j-1]+info[i]; int_buffer[j2++] = i; int_buffer[j2++] = chunks[i]; int_buffer[j2++] = info[i]; } } hypre_TFree(chunks, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&num_sends3,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm); displs = hypre_CTAlloc(HYPRE_Int, num_procs+1, HYPRE_MEMORY_HOST); displs[0] = 0; for (i=1; i < num_procs+1; i++) { displs[i] = displs[i-1]+info[i-1]; } recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs], HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(int_buffer,num_sends3,HYPRE_MPI_INT,recv_buf,info,displs, HYPRE_MPI_INT,comm); hypre_TFree(int_buffer, HYPRE_MEMORY_HOST); hypre_TFree(info, HYPRE_MEMORY_HOST); /* determine recv procs and amount of data to be received */ num_recvs = 0; for (j=0; j < displs[num_procs]; j+=3) { if (recv_buf[j] == my_id) { num_recvs++; } } recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); recv_chunks = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); dbl_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); j2 = 0; recv_vec_starts[0] = 0; dbl_recv_vec_starts[0] = 0; for (i=0; i < num_procs; i++) { for (j=displs[i]; j < displs[i+1]; j+=3) { if (recv_buf[j] == my_id) { recv_procs[j2] = i; recv_chunks[j2++] = recv_buf[j+1]; recv_vec_starts[j2] = recv_vec_starts[j2-1]+2*recv_buf[j+1] +recv_buf[j+2]; dbl_recv_vec_starts[j2] = dbl_recv_vec_starts[j2-1]+recv_buf[j+2]; } if (j2 == num_recvs) { break; } } } hypre_TFree(recv_buf, HYPRE_MEMORY_HOST); hypre_TFree(displs, HYPRE_MEMORY_HOST); /* set up data to be sent to send procs */ /* send_i contains for each send proc : row no., no. of elmts and column indices, send_data contains corresponding values */ send_i = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST); send_data = hypre_CTAlloc(HYPRE_Complex, dbl_send_map_starts[num_sends], HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_BigInt, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST); recv_data = hypre_CTAlloc(HYPRE_Complex, dbl_recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST); j=0; jj=0; for (i=0; i < off_proc_i_indx; i++) { row = off_proc_i[i++]; n = (HYPRE_Int)off_proc_i[i]; proc_id = proc_id_mem[i/2]; indx = hypre_BinarySearch(send_procs,proc_id,num_sends); iii = send_map_starts[indx]; iid = dbl_send_map_starts[indx]; send_i[iii++] = row; send_i[iii++] = (HYPRE_BigInt) n; for (ii = 0; ii < n; ii++) { send_i[iii++] = off_proc_j[jj]; send_data[iid++] = off_proc_data[jj++]; } send_map_starts[indx] = iii; dbl_send_map_starts[indx] = iid; } hypre_TFree(proc_id_mem, HYPRE_MEMORY_HOST); for (i=num_sends; i > 0; i--) { send_map_starts[i] = send_map_starts[i-1]; dbl_send_map_starts[i] = dbl_send_map_starts[i-1]; } send_map_starts[0] = 0; dbl_send_map_starts[0] = 0; num_requests = num_recvs+num_sends; if (num_requests) { requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); } j=0; for (i=0; i < num_recvs; i++) { vec_start = recv_vec_starts[i]; vec_len = recv_vec_starts[i+1] - vec_start; ip = recv_procs[i]; hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm, &requests[j++]); } for (i=0; i < num_sends; i++) { vec_start = send_map_starts[i]; vec_len = send_map_starts[i+1] - vec_start; ip = send_procs[i]; hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm, &requests[j++]); } if (num_requests) { hypre_MPI_Waitall(num_requests, requests, status); } j=0; for (i=0; i < num_recvs; i++) { vec_start = dbl_recv_vec_starts[i]; vec_len = dbl_recv_vec_starts[i+1] - vec_start; ip = recv_procs[i]; hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX, ip, 0, comm, &requests[j++]); } for (i=0; i < num_sends; i++) { vec_start = dbl_send_map_starts[i]; vec_len = dbl_send_map_starts[i+1] - vec_start; ip = send_procs[i]; hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX, ip, 0, comm, &requests[j++]); } if (num_requests) { hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_data, HYPRE_MEMORY_HOST); hypre_TFree(send_procs, HYPRE_MEMORY_HOST); hypre_TFree(send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(dbl_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(recv_procs, HYPRE_MEMORY_HOST); hypre_TFree(recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(dbl_recv_vec_starts, HYPRE_MEMORY_HOST); j = 0; j2 = 0; for (i=0; i < num_recvs; i++) { for (ii=0; ii < recv_chunks[i]; ii++) { row = recv_i[j]; HYPRE_Int rcvi = (HYPRE_Int) recv_i[j+1]; hypre_IJMatrixAddToValuesParCSR(matrix,1,&rcvi,&row,&row_index, &recv_i[j+2],&recv_data[j2]); j2 += recv_i[j+1]; j += recv_i[j+1]+2; } } hypre_TFree(recv_chunks, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } #else /* assumed partition version */ HYPRE_Int hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix, HYPRE_Int off_proc_i_indx, HYPRE_Int max_off_proc_elmts, HYPRE_Int current_num_elmts, HYPRE_MemoryLocation memory_location, HYPRE_BigInt *off_proc_i, HYPRE_BigInt *off_proc_j, HYPRE_Complex *off_proc_data ) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int i, j, k, in_i; HYPRE_Int myid; HYPRE_Int proc_id, last_proc, prev_id, tmp_id; HYPRE_Int max_response_size; HYPRE_BigInt global_num_cols; HYPRE_BigInt global_first_col; HYPRE_BigInt global_first_row; HYPRE_Int ex_num_contacts = 0, num_rows = 0; HYPRE_BigInt range_start, range_end; HYPRE_Int num_elements; HYPRE_Int storage; HYPRE_Int indx; HYPRE_BigInt row; HYPRE_Int num_ranges, row_index = 0; HYPRE_Int num_recvs; HYPRE_BigInt upper_bound; HYPRE_Int counter; HYPRE_Int num_real_procs; HYPRE_Int /*current_proc,*/ original_proc_indx; HYPRE_BigInt *row_list=NULL; HYPRE_Int *row_list_num_elements=NULL; HYPRE_Int *a_proc_id=NULL, *orig_order=NULL; HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL; HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL; HYPRE_BigInt *ex_contact_buf = NULL; HYPRE_Int *recv_starts=NULL; HYPRE_BigInt *response_buf = NULL; HYPRE_Int *response_buf_starts=NULL; HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL; HYPRE_Int *argsort_contact_procs = NULL; HYPRE_Int obj_size_bytes, complex_size; HYPRE_BigInt big_int_size; HYPRE_Int tmp_int; HYPRE_BigInt tmp_big_int; HYPRE_BigInt *col_ptr; HYPRE_BigInt *big_int_data = NULL; HYPRE_Int big_int_data_size = 0, complex_data_size = 0; void *void_contact_buf = NULL; void *index_ptr; void *recv_data_ptr; HYPRE_Complex tmp_complex; HYPRE_Complex *col_data_ptr; HYPRE_Complex *complex_data = NULL; hypre_DataExchangeResponse response_obj1, response_obj2; hypre_ProcListElements send_proc_obj; hypre_IJAssumedPart *apart; hypre_MPI_Comm_rank(comm, &myid); global_num_cols = hypre_IJMatrixGlobalNumCols(matrix); global_first_col = hypre_IJMatrixGlobalFirstCol(matrix); global_first_row = hypre_IJMatrixGlobalFirstRow(matrix); if (memory_location == HYPRE_MEMORY_DEVICE) { HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2*current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST); hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); for (i = 0; i < current_num_elmts; i++) { off_proc_i_h[2*i] = tmp[i]; off_proc_i_h[2*i+1] = 1; } off_proc_i_indx = current_num_elmts * 2; off_proc_i = off_proc_i_h; off_proc_j = off_proc_j_h; off_proc_data = off_proc_data_h; hypre_TFree(tmp, HYPRE_MEMORY_HOST); } /* call hypre_IJMatrixAddToValuesParCSR directly inside this function * with one chunk of data */ HYPRE_Int off_proc_nelm_recv_cur = 0; HYPRE_Int off_proc_nelm_recv_max = 0; HYPRE_BigInt *off_proc_i_recv = NULL; HYPRE_BigInt *off_proc_j_recv = NULL; HYPRE_Complex *off_proc_data_recv = NULL; HYPRE_BigInt *off_proc_i_recv_d = NULL; HYPRE_BigInt *off_proc_j_recv_d = NULL; HYPRE_Complex *off_proc_data_recv_d = NULL; num_rows = off_proc_i_indx/2; /* verify that we have created the assumed partition */ if (hypre_IJMatrixAssumedPart(matrix) == NULL) { hypre_IJMatrixCreateAssumedPartition(matrix); } apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix); /*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(par_matrix); } apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/ row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST); row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); /* get the assumed processor id for each row */ if (num_rows > 0 ) { for (i=0; i < num_rows; i++) { row = off_proc_i[i*2]; //if (row < 0) row = -row - 1; row_list[i] = row; row_list_num_elements[i] = off_proc_i[i*2+1]; hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row, global_num_cols, &proc_id); a_proc_id[i] = proc_id; orig_order[i] = i; } /* now we need to find the actual order of each row - sort on row - this will result in proc ids sorted also...*/ hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1); /* calculate the number of contacts */ ex_num_contacts = 1; last_proc = a_proc_id[0]; for (i=1; i < num_rows; i++) { if (a_proc_id[i] > last_proc) { ex_num_contacts++; last_proc = a_proc_id[i]; } } } /* now we will go through a create a contact list - need to contact assumed processors and find out who the actual row owner is - we will contact with a range (2 numbers) */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST); ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST); ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST); counter = 0; range_end = -1; for (i=0; i< num_rows; i++) { if (row_list[i] > range_end) { /* assumed proc */ proc_id = a_proc_id[i]; /* end of prev. range */ if (counter > 0) { ex_contact_buf[counter*2 - 1] = row_list[i-1]; } /*start new range*/ ex_contact_procs[counter] = proc_id; ex_contact_vec_starts[counter] = counter*2; ex_contact_buf[counter*2] = row_list[i]; counter++; hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols, &range_start, &range_end); } } /* finish the starts */ ex_contact_vec_starts[counter] = counter*2; /* finish the last range */ if (counter > 0) { ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1]; } /* don't allocate space for responses */ /* create response object - can use same fill response as used in the commpkg routine */ response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs; response_obj1.data1 = apart; /* this is necessary so we can fill responses*/ response_obj1.data2 = NULL; max_response_size = 6; /* 6 means we can fit 3 ranges*/ hypre_DataExchangeList(ex_num_contacts, ex_contact_procs, ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt), sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1, comm, (void**) &response_buf, &response_buf_starts); /* now response_buf contains a proc_id followed by a range upper bound */ hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST); /*how many ranges were returned?*/ num_ranges = response_buf_starts[ex_num_contacts]; num_ranges = num_ranges/2; prev_id = -1; j = 0; counter = 0; num_real_procs = 0; /* loop through ranges - create a list of actual processor ids*/ for (i=0; i<num_ranges; i++) { upper_bound = response_buf[i*2+1]; counter = 0; tmp_id = response_buf[i*2]; /* loop through row_list entries - counting how many are in the range */ while (j < num_rows && row_list[j] <= upper_bound) { real_proc_id[j] = tmp_id; j++; counter++; } if (counter > 0 && tmp_id != prev_id) { num_real_procs++; } prev_id = tmp_id; } /* now we have the list of real processor ids (real_proc_id) - and the number of distinct ones - so now we can set up data to be sent - we have HYPRE_Int data and HYPRE_Complex data. that we will need to pack together */ /* first find out how many rows and elements we need to send per proc - so we can do storage */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); counter = 0; if (num_real_procs > 0 ) { ex_contact_procs[0] = real_proc_id[0]; num_rows_per_proc[0] = 1; num_elements_total[0] = row_list_num_elements[orig_order[0]]; /* loop through real procs - these are sorted (row_list is sorted also)*/ for (i=1; i < num_rows; i++) { if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */ { num_rows_per_proc[counter] += 1; /*another row */ num_elements_total[counter] += row_list_num_elements[orig_order[i]]; } else /* new processor */ { counter++; ex_contact_procs[counter] = real_proc_id[i]; num_rows_per_proc[counter] = 1; num_elements_total[counter] = row_list_num_elements[orig_order[i]]; } } } /* to pack together, we need to use the largest obj. size of (HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are wasting some storage, but I do not think that it will be a large amount since this function should not be used on really large amounts of data anyway*/ big_int_size = sizeof(HYPRE_BigInt); complex_size = sizeof(HYPRE_Complex); obj_size_bytes = hypre_max(big_int_size, complex_size); /* set up data to be sent to send procs */ /* for each proc, ex_contact_buf contains #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* first calculate total storage and make vec_starts arrays */ storage = 0; ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST); ex_contact_vec_starts[0] = -1; for (i=0; i < num_real_procs; i++) { storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i]; ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */ } hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST); /*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/ void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST); index_ptr = void_contact_buf; /* step through with this index */ /* for each proc: #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* un-sort real_proc_id - we want to access data arrays in order, so cheaper to do this*/ us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) { us_real_proc_id[orig_order[i]] = real_proc_id[i]; } hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST); counter = 0; /* index into data arrays */ prev_id = -1; for (i=0; i < num_rows; i++) { proc_id = us_real_proc_id[i]; /* can't use row list[i] - you loose the negative signs that differentiate add/set values */ row = off_proc_i[i*2]; num_elements = row_list_num_elements[i]; /* find position of this processor */ indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs); in_i = ex_contact_vec_starts[indx]; index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes); /* first time for this processor - add the number of rows to the buffer */ if (in_i < 0) { in_i = -in_i - 1; /* re-calc. index_ptr since in_i was negative */ index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes); tmp_int = num_rows_per_proc[indx]; hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* add row # */ hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* add number of elements */ hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* now add col indices */ for (j=0; j< num_elements; j++) { tmp_big_int = off_proc_j[counter+j]; /* col number */ hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i ++; } /* now add data */ for (j=0; j< num_elements; j++) { tmp_complex = off_proc_data[counter++]; /* value */ hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* increment the indexes to keep track of where we are - we * adjust below to be actual starts*/ ex_contact_vec_starts[indx] = in_i; } /* some clean up */ hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST); hypre_TFree(orig_order, HYPRE_MEMORY_HOST); hypre_TFree(row_list, HYPRE_MEMORY_HOST); hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST); for (i=num_real_procs; i > 0; i--) { ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1]; } ex_contact_vec_starts[0] = 0; /* now send the data */ /***********************************/ /* first get the integer info in send_proc_obj */ /* the response we expect is just a confirmation*/ response_buf = NULL; response_buf_starts = NULL; /*build the response object*/ /* use the send_proc_obj for the info kept from contacts */ /*estimate inital storage allocation */ send_proc_obj.length = 0; send_proc_obj.storage_length = num_real_procs + 5; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = storage + 20; send_proc_obj.v_elements = hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); response_obj2.fill_response = hypre_FillResponseIJOffProcVals; response_obj2.data1 = NULL; response_obj2.data2 = &send_proc_obj; max_response_size = 0; hypre_DataExchangeList(num_real_procs, ex_contact_procs, void_contact_buf, ex_contact_vec_starts, obj_size_bytes, 0, &response_obj2, max_response_size, 2, comm, (void **) &response_buf, &response_buf_starts); hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); /* Now we can unpack the send_proc_objects and call set and add to values functions. We unpack messages in a deterministic order, using processor rank */ num_recvs = send_proc_obj.length; argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); for(i=0; i < num_recvs; i++) { argsort_contact_procs[i] = i; } /* This sort's the id array, but the original indices are stored in * argsort_contact_procs */ hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 ); /* alias */ recv_data_ptr = send_proc_obj.v_elements; recv_starts = send_proc_obj.vec_starts; for (i=0; i < num_recvs; i++) { /* Find the current processor in order, and reset recv_data_ptr to that processor's message */ original_proc_indx = argsort_contact_procs[i]; /*current_proc = send_proc_obj.id[i];*/ indx = recv_starts[original_proc_indx]; recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes); /* get the number of rows for this recv */ hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; for (j=0; j < num_rows; j++) /* for each row: unpack info */ { /* row # */ hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* num elements for this row */ hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* col indices */ /* Need to check this again !!!! */ if (big_int_size == obj_size_bytes) { col_ptr = (HYPRE_BigInt *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes); } else /* copy data */ { if (big_int_data_size < num_elements) { big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST); } for (k=0; k< num_elements; k++) { hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_ptr = big_int_data; } /* col data */ if (complex_size == obj_size_bytes) { col_data_ptr = (HYPRE_Complex *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes); } else /* copy data */ { if (complex_data_size < num_elements) { complex_data = hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST); } for (k=0; k< num_elements; k++) { hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_data_ptr = complex_data; } if (memory_location == HYPRE_MEMORY_HOST) { hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr); } else { HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements; if (nelm_new > off_proc_nelm_recv_max) { off_proc_nelm_recv_max = nelm_new * 2; off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); } HYPRE_Int i; for (i = 0; i < num_elements; i++) { off_proc_i_recv[off_proc_nelm_recv_cur + i] = row; } hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); off_proc_nelm_recv_cur = nelm_new; } indx += (num_elements*2); } } if (memory_location == HYPRE_MEMORY_DEVICE) { off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); #if defined(HYPRE_USING_CUDA) hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d, off_proc_data_recv_d, "add"); #endif } hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST); if (big_int_data) { hypre_TFree(big_int_data, HYPRE_MEMORY_HOST); } if (complex_data) { hypre_TFree(complex_data, HYPRE_MEMORY_HOST); } if (memory_location == HYPRE_MEMORY_DEVICE) { hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST); } hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } #endif /*-------------------------------------------------------------------- * hypre_FillResponseIJOffProcVals * Fill response function for the previous function (2nd data exchange) *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int index, count, elength; HYPRE_Int object_size; void *index_ptr; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2; object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex)); hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for vec starts * and id */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=20; /*add space for 20 more contact*/ send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); if( send_proc_obj->id != NULL) { send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* current number of elements */ if( send_proc_obj->id != NULL) { send_proc_obj->id[count] = contact_proc; } /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 100); elength += index; send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements, char, elength*object_size, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size); hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts[count+1] = index + contact_size; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*--------------------------------------------------------------------*/ HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length) { HYPRE_Int low, high, m; low = 0; high = list_length; if (value >= list[high] || value < list[low]) { return -1; } else { while (low+1 < high) { m = (low + high) / 2; if (value < list[m]) { high = m; } else if (value >= list[m]) { low = m; } } return low; } } /****************************************************************************** * * hypre_IJMatrixAssembleParCSR * * assembles IJMatrix from AuxParCSRMatrix auxiliary structure *****************************************************************************/ HYPRE_Int hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *diag_j; HYPRE_Int *offd_j = NULL; HYPRE_Complex *diag_data; HYPRE_Complex *offd_data = NULL; HYPRE_Int i, j, j0; HYPRE_Int num_cols_offd; HYPRE_Int *diag_pos; HYPRE_BigInt *col_map_offd; HYPRE_Int *row_length; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int my_id, num_procs; HYPRE_Int num_rows; HYPRE_Int i_diag, i_offd; HYPRE_BigInt col_0, col_n; HYPRE_Int nnz_offd; HYPRE_BigInt *big_offd_j; HYPRE_BigInt *tmp_j; HYPRE_Complex temp; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix); #else HYPRE_BigInt base = col_partitioning[0]; #endif HYPRE_Int off_proc_i_indx; HYPRE_Int max_off_proc_elmts; HYPRE_Int current_num_elmts; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int offd_proc_elmts; //HYPRE_Int new_off_proc_i_indx; //HYPRE_Int cancel_indx; //HYPRE_Int col_indx; //HYPRE_Int current_indx; //HYPRE_Int current_i; //HYPRE_Int row_len; HYPRE_Int max_num_threads; HYPRE_Int aux_flag, aux_flag_global; max_num_threads = hypre_NumThreads(); /* first find out if anyone has an aux_matrix, and create one if you don't * have one, but other procs do */ aux_flag = 0; aux_flag_global = 0; if (aux_matrix) { aux_flag = 1; } hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (aux_flag_global && (!aux_flag)) { hypre_MPI_Comm_rank(comm, &my_id); num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]); hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } if (aux_matrix) { /* first delete all cancelled elements */ /*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); if (cancel_indx) { current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); col_indx = 0; current_i = 0; current_indx = 0; new_off_proc_i_indx = off_proc_i_indx; for (i=0; i < off_proc_i_indx; i= i+2) { row_len = off_proc_i[i+1]; for (j=0; j < off_proc_i[i+1]; j++) { if (off_proc_j[col_indx] == -1) { col_indx++; row_len--; current_num_elmts--; } else { off_proc_j[current_indx] = off_proc_j[col_indx]; off_proc_data[current_indx++] = off_proc_data[col_indx++]; } } if (row_len) { off_proc_i[current_i] = off_proc_i[i]; off_proc_i[current_i+1] = row_len; current_i += 2; } else { new_off_proc_i_indx -= 2; } } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; }*/ off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (offd_proc_elmts) { max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix); hypre_IJMatrixAssembleOffProcValsParCSR( matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts, HYPRE_MEMORY_HOST, off_proc_i, off_proc_j, off_proc_data); } } if (hypre_IJMatrixAssembleFlag(matrix) == 0) { hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; #else num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]); col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; #endif /* move data into ParCSRMatrix if not there already */ if (hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { HYPRE_Int *diag_array, *offd_array; diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); diag_pos = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); i_diag = 0; i_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i, j, i_diag, i_offd) #endif { HYPRE_BigInt *local_j; HYPRE_Complex *local_data; HYPRE_Int rest, size, ns, ne; HYPRE_Int num_threads, my_thread_num; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); size = num_rows/num_threads; rest = num_rows - size*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(size + 1); ne = (my_thread_num+1)*(size + 1); } else { ns = my_thread_num*size + rest; ne = (my_thread_num+1)*size + rest; } i_diag = 0; i_offd = 0; for (i=ns; i < ne; i++) { local_j = aux_j[i]; local_data = aux_data[i]; diag_pos[i] = -1; for (j=0; j < row_length[i]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { i_offd++; } else { i_diag++; if ((HYPRE_Int)(local_j[j]-col_0) == i) { diag_pos[i] = j; } } } } diag_array[my_thread_num] = i_diag; offd_array[my_thread_num] = i_offd; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { i_diag = 0; i_offd = 0; for (i = 0; i < num_threads; i++) { i_diag += diag_array[i]; i_offd += offd_array[i]; diag_array[i] = i_diag; offd_array[i] = i_offd; } diag_i[num_rows] = i_diag; offd_i[num_rows] = i_offd; hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd)); diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag)); diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag)); offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd)); offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd)); big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd)); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num) { i_diag = diag_array[my_thread_num-1]; i_offd = offd_array[my_thread_num-1]; } else { i_diag = 0; i_offd = 0; } for (i=ns; i < ne; i++) { diag_i[i] = i_diag; offd_i[i] = i_offd; local_j = aux_j[i]; local_data = aux_data[i]; if (diag_pos[i] > -1) { diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0); diag_data[i_diag++] = local_data[diag_pos[i]]; } for (j=0; j < row_length[i]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { big_offd_j[i_offd] = local_j[j]; offd_data[i_offd++] = local_data[j]; } else if (j != diag_pos[i]) { diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0); diag_data[i_diag++] = local_data[j]; } } } } /* end parallel region */ hypre_TFree(diag_array, HYPRE_MEMORY_HOST); hypre_TFree(offd_array, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_data; hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows]; if (offd_i[num_rows] > 0) { hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixBigJ(offd) = big_offd_j; hypre_CSRMatrixData(offd) = offd_data; } hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows]; hypre_TFree(diag_pos, HYPRE_MEMORY_HOST); } else { /* move diagonal element into first space */ big_offd_j = hypre_CSRMatrixBigJ(offd); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private (i,j,j0,temp) #endif for (i = 0; i < num_rows; i++) { j0 = diag_i[i]; for (j=j0; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { temp = diag_data[j0]; diag_data[j0] = diag_data[j]; diag_data[j] = temp; diag_j[j] = diag_j[j0]; diag_j[j0] = i; break; } } } offd_j = hypre_CSRMatrixJ(offd); if (!offd_j && offd_i[num_rows]) { offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixJ(offd) = offd_j; } } /* generate the nonzero rows inside offd and diag by calling */ hypre_CSRMatrixSetRownnz(diag); hypre_CSRMatrixSetRownnz(offd); /* generate col_map_offd */ nnz_offd = offd_i[num_rows]; if (nnz_offd) { tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST); for (i=0; i < nnz_offd; i++) { tmp_j[i] = big_offd_j[i]; } hypre_BigQsort0(tmp_j,0,nnz_offd-1); num_cols_offd = 1; for (i=0; i < nnz_offd-1; i++) { if (tmp_j[i+1] > tmp_j[i]) { tmp_j[num_cols_offd++] = tmp_j[i+1]; } } col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) { col_map_offd[i] = tmp_j[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) #endif for (i=0; i < nnz_offd; i++) { offd_j[i]=hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd); } if (base) { for (i=0; i < num_cols_offd; i++) { col_map_offd[i] -= base; } } hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = NULL; } hypre_IJMatrixAssembleFlag(matrix) = 1; } hypre_AuxParCSRMatrixDestroy(aux_matrix); hypre_IJMatrixTranslator(matrix) = NULL; return hypre_error_flag; } /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixSetValuesOMPParCSR * * sets values in an IJMatrix before assembly, * use of this routine requires that the values in rows are different from each * other, i.e rows[i] != rows[j] for i != j * to ensure accurate threading * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; //HYPRE_Int cancel_indx; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; HYPRE_Int pstart; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; //HYPRE_Int *offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); //HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); //offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } return hypre_error_flag; } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* processor does not own the row */ //else /*search for previous occurrences and cancel them */ /*{ if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; offproc_cnt[my_thread_num]++; */ /*cancel_indx++;*/ /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /*} else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ //} //} } } /*end parallel region */ } else /* matrix not assembled */ { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_data = hypre_CSRMatrixData(offd); big_offd_j = hypre_CSRMatrixBigJ(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* processor does not own the row */ /*else { if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; */ /*cancel_indx++;*/ //offproc_cnt[my_thread_num]++; /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /* } else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ /*} }*/ } } /* end parallel region */ } /*if (error_flag) { return hypre_error_flag; } if (aux_matrix) { for (i1=0; i1 < max_num_threads; i1++) { cancel_indx += offproc_cnt[i1]; } hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx; }*/ //hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesOMPParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int pstart; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int **offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; HYPRE_Int i1; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST); for (i1=0; i1 < max_num_threads; i1++) offproc_cnt[i1] = NULL; #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */ { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* not my row */ /* need to find solution for threaded version!!!! */ /* could save row number and process later .... */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i+2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } } } } /* end parallel region */ } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_Int i, j, ii, n; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i+2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } } } } /*end parallel region */ } if (error_flag) { return hypre_error_flag; } if (!aux_matrix) { HYPRE_Int size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } for (i1 = 0; i1 < max_num_threads; i1++) { if (offproc_cnt[i1]) { HYPRE_Int *my_offproc_cnt = offproc_cnt[i1]; HYPRE_Int i, i2, ii, n, indx; HYPRE_BigInt row; for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2) { ii = my_offproc_cnt[i2]; row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = my_offproc_cnt[i2+1]; current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST); } } hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
DoAllWrap.h
/* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ #ifndef GALOIS_DOALL_WRAPPER_H #define GALOIS_DOALL_WRAPPER_H #include "galois/Galois.h" #include "galois/GaloisForwardDecl.h" #include "galois/OrderedTraits.h" #include "galois/runtime/Executor_DoAll_Old.h" #include "galois/runtime/Executor_DoAll.h" #include "galois/substrate/EnvCheck.h" #ifdef GALOIS_USE_TBB #include "tbb/parallel_for_each.h" #endif #include "CilkInit.h" #include <unistd.h> #include "llvm/Support/CommandLine.h" namespace galois { enum DoAllTypes { DO_ALL_OLD, DO_ALL_OLD_STEAL, DOALL_GALOIS_FOREACH, DO_ALL, DOALL_CILK, DOALL_OPENMP }; namespace cll = llvm::cl; // extern cll::opt<DoAllTypes> doAllKind; static cll::opt<DoAllTypes> doAllKind( "doAllKind", cll::desc("DoAll Implementation"), cll::values(clEnumVal(DO_ALL_OLD, "DO_ALL_OLD"), clEnumVal(DO_ALL_OLD_STEAL, "DO_ALL_OLD_STEAL"), clEnumVal(DOALL_GALOIS_FOREACH, "DOALL_GALOIS_FOREACH"), clEnumVal(DO_ALL, "DO_ALL"), clEnumVal(DOALL_CILK, "DOALL_CILK"), clEnumVal(DOALL_OPENMP, "DOALL_OPENMP"), clEnumValEnd), cll::init(DO_ALL_OLD)); // default is regular DOALL void setDoAllImpl(const DoAllTypes& type); DoAllTypes getDoAllImpl(void); template <DoAllTypes TYPE> struct DoAllImpl { template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { std::abort(); } }; template <> struct DoAllImpl<DO_ALL_OLD> { template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { galois::runtime::do_all_gen_old( range, func, std::tuple_cat(std::make_tuple(steal()), argsTuple)); } }; template <> struct DoAllImpl<DO_ALL_OLD_STEAL> { template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { galois::runtime::do_all_gen_old( range, func, std::tuple_cat(std::make_tuple(steal()), argsTuple)); } }; template <> struct DoAllImpl<DOALL_GALOIS_FOREACH> { template <typename T, typename _F> struct FuncWrap { _F func; template <typename C> void operator()(T& x, C&) { func(x); } }; template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { using T = typename R::value_type; const unsigned CHUNK_SIZE = 128; // const unsigned CHUNK_SIZE = get_type_by_supertype<chunk_size_tag, // ArgsTuple>::type::value; using WL_ty = galois::worklists::PerThreadChunkLIFO<CHUNK_SIZE, T>; galois::runtime::for_each_gen( range, FuncWrap<T, F>{func}, std::tuple_cat( std::make_tuple(galois::wl<WL_ty>(), no_pushes(), no_conflicts()), argsTuple)); } }; template <> struct DoAllImpl<DO_ALL> { template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { galois::runtime::do_all_gen(range, func, argsTuple); } }; #ifdef HAVE_CILK template <> struct DoAllImpl<DOALL_CILK> { template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { CilkInit(); cilk_for(auto it = range.begin(), end = range.end(); it != end; ++it) { func(*it); } } }; #else template <> struct DoAllImpl<DOALL_CILK> { template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { GALOIS_DIE("Cilk not found\n"); } }; #endif template <> struct DoAllImpl<DOALL_OPENMP> { template <typename R, typename F, typename ArgsTuple> static inline void go(const R& range, const F& func, const ArgsTuple& argsTuple) { const auto end = range.end(); #pragma omp parallel for schedule(guided) for (auto it = range.begin(); it < end; ++it) { func(*it); } } }; template <typename R, typename F, typename ArgsTuple> void do_all_choice(const R& range, const F& func, const DoAllTypes& type, const ArgsTuple& argsTuple) { switch (type) { case DO_ALL_OLD_STEAL: DoAllImpl<DO_ALL_OLD_STEAL>::go(range, func, argsTuple); break; case DOALL_GALOIS_FOREACH: DoAllImpl<DOALL_GALOIS_FOREACH>::go(range, func, argsTuple); break; case DO_ALL_OLD: DoAllImpl<DO_ALL_OLD>::go(range, func, argsTuple); break; case DO_ALL: DoAllImpl<DO_ALL>::go(range, func, argsTuple); break; case DOALL_CILK: DoAllImpl<DOALL_CILK>::go(range, func, argsTuple); break; case DOALL_OPENMP: // DoAllImpl<DOALL_OPENMP>::go(range, func, argsTuple); std::abort(); break; default: abort(); break; } } template <typename R, typename F, typename ArgsTuple> void do_all_choice(const R& range, const F& func, const ArgsTuple& argsTuple) { do_all_choice(range, func, doAllKind, argsTuple); } } // end namespace galois #endif // GALOIS_DOALL_WRAPPER_H
pzlangb.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #include "core_lapack.h" #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /***************************************************************************//** * Parallel tile calculation of max, one, infinity or Frobenius matrix norm * for a general band matrix. ******************************************************************************/ void plasma_pzlangb(plasma_enum_t norm, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; double stub; int wcnt = 0; int ldwork, klt, kut; double *workspace, *scale, *sumsq; switch (norm) { //================ // PlasmaMaxNorm //================ case PlasmaMaxNorm: wcnt = 0; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_zlange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[wcnt], sequence, request); wcnt++; } } #pragma omp taskwait plasma_core_omp_dlange(PlasmaMaxNorm, 1, wcnt, work, 1, &stub, value, sequence, request); break; //================ // PlasmaOneNorm //================ case PlasmaOneNorm: // # of tiles in upper band (not including diagonal) kut = (A.ku+A.nb-1)/A.nb; // # of tiles in lower band (not including diagonal) klt = (A.kl+A.nb-1)/A.nb; ldwork = kut+klt+1; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_zlange_aux(PlasmaOneNorm, mvam, nvan, A(m,n), ldam, &work[(m-m_start)*A.n+n*A.nb], sequence, request); } } #pragma omp taskwait workspace = &work[A.n*ldwork]; plasma_core_omp_dlange(PlasmaInfNorm, A.n, ldwork, work, A.n, workspace, value, sequence, request); break; //================ // PlasmaInfNorm //================ case PlasmaInfNorm: ldwork = A.mb*A.mt; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_zlange_aux(PlasmaInfNorm, mvam, nvan, A(m,n), ldam, &work[m*A.mb+n*ldwork], sequence, request); } } #pragma omp taskwait //nwork = A.nt; workspace = &work[ldwork*A.nt]; plasma_core_omp_dlange(PlasmaInfNorm, ldwork, A.nt, work, ldwork, workspace, value, sequence, request); break; //====================== // PlasmaFrobeniusNorm //====================== case PlasmaFrobeniusNorm: kut = (A.ku+A.nb-1)/A.nb; // # of tiles in upper band (not including diagonal) klt = (A.kl+A.nb-1)/A.nb; // # of tiles in lower band (not including diagonal) ldwork = kut+klt+1; scale = work; sumsq = &work[ldwork*A.nt]; for (int n = 0; n < A.nt; n++ ) { int nvan = plasma_tile_nview(A, n); int m_start = (imax(0, n*A.nb-A.ku)) / A.nb; int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb; for (int m = m_start; m <= m_end; m++ ) { int ldam = plasma_tile_mmain_band(A, m, n); int mvam = plasma_tile_mview(A, m); plasma_core_omp_zgessq(mvam, nvan, A(m,n), ldam, &scale[n*ldwork+m-m_start], &sumsq[n*ldwork+m-m_start], sequence, request); } } #pragma omp taskwait plasma_core_omp_dgessq_aux(ldwork*A.nt, scale, sumsq, value, sequence, request); break; default: assert(0); } }
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, typename TimestampT_ = WeightT_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; int64_t num_edges_ = 0; int64_t base_graph_num_edges_ = 0; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_, TimestampT_>> e) { return NodeWeight<NodeID_, WeightT_, TimestampT_>(e.u, e.v.w, e.v.t); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (NodeID_) e.v); } return max_seen; } // pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { // pvector<NodeID_> degrees(num_nodes_, 0); // #pragma omp parallel for // for (auto it = el.begin(); it < el.end(); it++) { // Edge e = *it; // if (symmetrize_ || (!symmetrize_ && !transpose)) // fetch_and_add(degrees[e.u], 1); // if (symmetrize_ || (!symmetrize_ && transpose)) // fetch_and_add(degrees[(NodeID_) e.v], 1); // } // return degrees; // } // // static // pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { // pvector<SGOffset> sums(degrees.size() + 1); // SGOffset total = 0; // for (size_t n=0; n < degrees.size(); n++) { // sums[n] = total; // total += degrees[n]; // } // sums[degrees.size()] = total; // return sums; // } // // static // pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { // const size_t block_size = 1<<20; // const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; // pvector<SGOffset> local_sums(num_blocks); // #pragma omp parallel for // for (size_t block=0; block < num_blocks; block++) { // SGOffset lsum = 0; // size_t block_end = std::min((block + 1) * block_size, degrees.size()); // for (size_t i=block * block_size; i < block_end; i++) // lsum += degrees[i]; // local_sums[block] = lsum; // } // pvector<SGOffset> bulk_prefix(num_blocks+1); // SGOffset total = 0; // for (size_t block=0; block < num_blocks; block++) { // bulk_prefix[block] = total; // total += local_sums[block]; // } // bulk_prefix[num_blocks] = total; // pvector<SGOffset> prefix(degrees.size() + 1); // #pragma omp parallel for // for (size_t block=0; block < num_blocks; block++) { // SGOffset local_total = bulk_prefix[block]; // size_t block_end = std::min((block + 1) * block_size, degrees.size()); // for (size_t i=block * block_size; i < block_end; i++) { // prefix[i] = local_total; // local_total += degrees[i]; // } // } // prefix[degrees.size()] = bulk_prefix[num_blocks]; // return prefix; // } // // // Removes self-loops and redundant edges // // Side effect: neighbor IDs will be sorted // void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, // DestID_*** sq_index, DestID_** sq_neighs) { // pvector<NodeID_> diffs(g.num_nodes()); // DestID_ *n_start, *n_end; // #pragma omp parallel for private(n_start, n_end) // for (NodeID_ n=0; n < g.num_nodes(); n++) { // if (transpose) { // n_start = g.in_neigh(n).begin(); // n_end = g.in_neigh(n).end(); // } else { // n_start = g.out_neigh(n).begin(); // n_end = g.out_neigh(n).end(); // } // std::sort(n_start, n_end); // DestID_ *new_end = std::unique(n_start, n_end); // new_end = std::remove(n_start, new_end, n); // diffs[n] = new_end - n_start; // } // pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); // *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; // *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); // #pragma omp parallel for private(n_start) // for (NodeID_ n=0; n < g.num_nodes(); n++) { // if (transpose) // n_start = g.in_neigh(n).begin(); // else // n_start = g.out_neigh(n).begin(); // std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); // } // } // // CSRGraph<NodeID_, DestID_, invert> SquishGraph( // const CSRGraph<NodeID_, DestID_, invert> &g) { // DestID_ **out_index, *out_neighs, **in_index, *in_neighs; // SquishCSR(g, false, &out_index, &out_neighs); // if (g.directed()) { // if (invert) // SquishCSR(g, true, &in_index, &in_neighs); // return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, // out_neighs, in_index, // in_neighs); // } else { // return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, // out_neighs); // } // } // // /* // Graph Bulding Steps (for CSR): // - Read edgelist once to determine vertex degrees (CountDegrees) // - Determine vertex offsets by a prefix sum (ParallelPrefixSum) // - Allocate storage and set points according to offsets (GenIndex) // - Copy edges into storage // */ // void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, // DestID_** neighs) { // pvector<NodeID_> degrees = CountDegrees(el, transpose); // pvector<SGOffset> offsets = ParallelPrefixSum(degrees); // *neighs = new DestID_[offsets[num_nodes_]]; // *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); // #pragma omp parallel for // for (auto it = el.begin(); it < el.end(); it++) { // Edge e = *it; // if (symmetrize_ || (!symmetrize_ && !transpose)) // (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; // if (symmetrize_ || (!symmetrize_ && transpose)) // (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = // GetSource(e); // } // } // // CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { // DestID_ **index = nullptr, **inv_index = nullptr; // DestID_ *neighs = nullptr, *inv_neighs = nullptr; // Timer t; // t.Start(); // if (num_nodes_ == -1) // num_nodes_ = FindMaxNodeID(el)+1; // if (needs_weights_) // Generator<NodeID_, DestID_, WeightT_, TimestampT_>::InsertWeights(el); // MakeCSR(el, false, &index, &neighs); // if (!symmetrize_ && invert) // MakeCSR(el, true, &inv_index, &inv_neighs); // t.Stop(); // PrintTime("Build Time", t.Seconds()); // if (symmetrize_) // return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); // else // return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, // inv_index, inv_neighs); // } /* * Graph Building Steps: * - Read edgelist once to determine vertex degrees (CountDegrees) * - CountDegrees will count the degrees of the base-graph only * - Determine vertex offsets by a prefix sum (ParallelPrefixSum) * - Allocate storage and set points according to offsets * - Insert edges into storage * */ void MakePMemGraphFromEL(EdgeList &el, CSRGraph<NodeID_, DestID_, invert> &g) { Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el) + 1; random_shuffle(el.begin(), el.end()); // PrintEL(el); // pvector<NodeID_> out_degrees = CountDegrees(el, false); // AutoCorrectDegrees<NodeID_>(out_degrees); // adding gap to degrees // pvector<SGOffset> out_offsets(out_degrees.size() + 1); // ParallelPrefixSum<NodeID_, SGOffset>(out_degrees, out_offsets, onseg_num_vertex_, onseg_max_neighbor_); // NodeID_ out_index_length = out_offsets.size(); t.Stop(); PrintTime("Aux. Data Build Time", t.Seconds()); t.Start(); // base graph insertion auto end_it = el.begin() + base_graph_num_edges_; for (auto it = el.begin(); it < end_it; it++) { Edge e = *it; // adding edge u->v // g.insert_edge(e.u, e.v); // if (symmetrize_) { // // adding edge v->u // g.insert_edge(static_cast<NodeID_>(e.v), GetSource(e)); // } } t.Stop(); PrintTime("B-Graph Build Time", t.Seconds()); t.Start(); // dynamic insertion auto begin_it = el.begin() + base_graph_num_edges_; for (auto it = begin_it; it < el.end(); it++) { Edge e = *it; // adding edge u->v // g.insert_edge(e.u, e.v, true); // if (symmetrize_) { // // adding edge v->u // g.insert_edge(static_cast<NodeID_>(e.v), GetSource(e), true); // } } t.Stop(); PrintTime("D-Graph Build Time", t.Seconds()); cerr << t.Seconds() << endl; // fprintf(stderr, "%lf\n", t.Seconds()); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { EdgeList el; Timer t; if (cli_.base_filename() != "") { Reader<NodeID_, DestID_, WeightT_, TimestampT_, invert> r(cli_.base_filename()); el = r.ReadFile(needs_weights_); } else { printf("[%s]: graph input-file not exists, abort!!!\n", __FUNCTION__); exit(0); } base_graph_num_edges_ = el.size(); num_nodes_ = FindMaxNodeID(el) + 1; if(symmetrize_) { for(int i=0; i<base_graph_num_edges_; i+=1) { el.push_back(EdgePair<NodeID_, DestID_>(static_cast<NodeID_>(el[i].v), GetSource(el[i]))); } base_graph_num_edges_ *= 2; } // std::sort(el.begin(), el.end(), [](Edge &a, Edge &b) { // if(a.u != b.u) return a.u < b.u; // return (a.v < b.v); // }); if (needs_weights_) Generator<NodeID_, DestID_, WeightT_, TimestampT_>::InsertWeights(el); CSRGraph<NodeID_, DestID_, invert> g(el, !symmetrize_, base_graph_num_edges_, num_nodes_); el.clear(); if (cli_.dynamic_filename() != "") { Reader<NodeID_, DestID_, WeightT_, TimestampT_, invert> r(cli_.dynamic_filename()); el = r.ReadFile(needs_weights_); } else { printf("[%s]: graph input-file not exists, abort!!!\n", __FUNCTION__); exit(0); } if (needs_weights_) Generator<NodeID_, DestID_, WeightT_, TimestampT_>::InsertWeights(el); size_t dynamic_edges = el.size(); t.Start(); for(uint32_t i=0; i<dynamic_edges; i+=1) { g.add_edge(el[i].u, el[i].v.v, el[i].v.w); if(symmetrize_) { g.add_edge(el[i].v.v, el[i].u, el[i].v.w); } // if(i && i % 10000000 == 0) cout << "inserted " << (i/1000000) << "M dynamic edges" << endl; } t.Stop(); cout << "D-Graph Build Time: " << t.Seconds() << " seconds." << endl; return g; } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
1.norace7.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for schedule(auto) for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i][j - 1]; } // CHECK: Region is Data Race Free. // END
hmacSHA256_fmt_plug.c
/* * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * Based on hmac-md5 by Bartavelle * * SIMD added Feb, 2015, JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hmacSHA224; extern struct fmt_main fmt_hmacSHA256; #elif FMT_REGISTERS_H john_register_one(&fmt_hmacSHA224); john_register_one(&fmt_hmacSHA256); #else #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "base64_convert.h" #include "formats.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 2048 // scaled on core i7-quad HT #endif #else #ifndef OMP_SCALE #define OMP_SCALE 512 // scaled K8-dual HT #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "HMAC-SHA256" #define FORMAT_LABEL_224 "HMAC-SHA224" #define FORMAT_NAME "" #define ALGORITHM_NAME "password is key, SHA256 " SHA256_ALGORITHM_NAME #define ALGORITHM_NAME_224 "password is key, SHA224 " SHA256_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 64 #define PAD_SIZE_W (PAD_SIZE/4) #define BINARY_SIZE (256/8) #define BINARY_SIZE_224 (224/8) #define BINARY_ALIGN 4 #ifndef SIMD_COEF_32 #define SALT_LENGTH 1023 #define SALT_ALIGN 1 #else #define SALT_LIMBS 3 /* 3 limbs, 183 bytes */ #define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 9) #define SALT_ALIGN MEM_ALIGN_SIMD #endif #define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + BINARY_SIZE * 2) #define CIPHERTEXT_LENGTH_224 (SALT_LENGTH + 1 + BINARY_SIZE_224 * 2) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i&63) & (0xffffffff - 3)) * SIMD_COEF_32 + (3 - ((i&63) & 3)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE * SIMD_COEF_32) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"The quick brown fox jumps over the lazy dog#f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8", "key"}, {"#b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad", ""}, {"Beppe#Grillo#14651BA87C7F7DA88BCE0DF1F89C223975AC0FDF9C35378CB0857A81DFD5C408", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {"jquYnUyWT5NsbvjQDZXyCxMJB6PryALZdYOZ1bEuagcUmYcbqpx5vOvpxj7VEhqW7OIzHR2O9JLDKrhuDfZxQk9jOENQb4OzEkRZmN8czdGdo7nshdYU1zcdoDGVb3YTCbjeZvazi#c8b4b8a7888787eebca16099fd076092269919bb032bfec48eed7f41d42eba9a", "magnum"}, // JWM hash. {"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.eoaDVGTClRdfxUZXiPs3f8FmJDkDE_VCQFXqKxpLsts", "secret" }, #ifndef SIMD_COEF_32 {"12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012#ff504b06ee64f3ba7fe503496b451cf46ee34109a62d55cd4bf4f38077ee8145","1234567890" }, {"012345678901234567890123456789012345678901234567890123456789#6ec69f97e81e58b4a28ee13537c84df316cf8a6250e932de1d375e72843b8f9c", "123456"}, {"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123#389c4d8db62dea4c108cf12662da3c9440149800cd1e74f3738ba804024343b7","1234567890" }, {"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789#090487f586965594ae55d366cc9bc96d9f0ce44e253e975a1ed004c8a5edcf24", "123456"}, #endif {NULL} }; static struct fmt_tests tests_224[] = { {"what do ya want for nothing?#a30e01098bc6dbbf45690f3a7e9e6d0f8bbea2a39e6148008fd05e44", "Jefe"}, {"Beppe#Grillo#926E4A97B401242EF674CEE4C60D9FC6FF73007F871008D4C11F5B95", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {NULL} }; #ifdef SIMD_COEF_32 static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; typedef struct cur_salt_t { unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT]; int salt_len; } cur_salt_t; static cur_salt_t *cur_salt; static int bufsize; #define SALT_SIZE sizeof(cur_salt_t) #else static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static unsigned char (*opad)[PAD_SIZE]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char cur_salt[SALT_LENGTH+1]; static SHA256_CTX *ipad_ctx; static SHA256_CTX *opad_ctx; #define SALT_SIZE sizeof(cur_salt) #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #ifdef SIMD_COEF_32 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self, const int B_LEN) { #ifdef SIMD_COEF_32 int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(B_LEN, i)] = 0x80; ((unsigned int*)crypt_key)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + (i/SIMD_COEF_32) * PAD_SIZE_W * SIMD_COEF_32] = (B_LEN + PAD_SIZE) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad)); opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void init_256(struct fmt_main *self) { init(self, BINARY_SIZE); } static void init_224(struct fmt_main *self) { init(self, BINARY_SIZE_224); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_32 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } static char *split(char *ciphertext, int index, struct fmt_main *self, const int B_LEN, const int CT_LEN) { static char out[CIPHERTEXT_LENGTH + 1]; if (!strchr(ciphertext, '#') && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { // Treat this like a JWT hash. Convert into 'normal' hmac-sha256 format. char buf[BINARY_SIZE * 2 + 1], tmp[CIPHERTEXT_LENGTH + 1], *cpi; strnzcpy(tmp, ciphertext, sizeof(tmp)); cpi = strchr(tmp, '.'); cpi = strchr(&cpi[1], '.'); if (cpi - tmp + B_LEN * 2 + 1 > CT_LEN) return ciphertext; *cpi++ = 0; memset(buf, 0, sizeof(buf)); base64_convert(cpi, e_b64_mime, strlen(cpi), buf, e_b64_hex, sizeof(buf), flg_Base64_NO_FLAGS); if (strlen(buf) != B_LEN * 2) return ciphertext; sprintf(out, "%s#%s", tmp, buf); } else strnzcpy(out, ciphertext, sizeof(out)); strlwr(strrchr(out, '#')); return out; } static char *split_256(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static char *split_224(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE_224, CIPHERTEXT_LENGTH_224); } static int valid(char *ciphertext, struct fmt_main *self, const int B_LEN, const int CT_LEN) { int pos, i; char *p; p = strrchr(ciphertext, '#'); // allow # in salt if (!p && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { if (strlen(ciphertext) > CT_LEN) return 0; ciphertext = split(ciphertext, 0, self, B_LEN, CT_LEN); p = strrchr(ciphertext, '#'); } if (!p || p > &ciphertext[strlen(ciphertext)-1]) return 0; i = (int)(p - ciphertext); if (i > SALT_LENGTH) return 0; pos = i + 1; if (strlen(ciphertext + pos) != B_LEN * 2) return 0; for (i = pos; i < B_LEN * 2 + pos; i++) { if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static int valid_256(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static int valid_224(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE_224, CIPHERTEXT_LENGTH_224); } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = salt; #else strcpy((char*)cur_salt, (char*)salt); #endif } static MAYBE_INLINE void set_key(char *key, int index, const int B_LEN) { int len; #ifdef SIMD_COEF_32 ARCH_WORD_32 *ipadp = (ARCH_WORD_32*)&ipad[GETPOS(3, index)]; ARCH_WORD_32 *opadp = (ARCH_WORD_32*)&opad[GETPOS(3, index)]; const ARCH_WORD_32 *keyp = (ARCH_WORD_32*)key; unsigned int temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; SHA256_CTX ctx; int i; if (B_LEN == BINARY_SIZE) { SHA256_Init(&ctx); SHA256_Update(&ctx, key, len); SHA256_Final(k0, &ctx); } else { SHA224_Init(&ctx); SHA224_Update(&ctx, key, len); SHA224_Final(k0, &ctx); } keyp = (unsigned int*)k0; for(i = 0; i < B_LEN / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32) { temp = JOHNSWAP(*keyp++); *ipadp ^= temp; *opadp ^= temp; } } else while(((temp = JOHNSWAP(*keyp++)) & 0xff000000)) { if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00)) { ((unsigned short*)ipadp)[1] ^= (unsigned short)(temp >> 16); ((unsigned short*)opadp)[1] ^= (unsigned short)(temp >> 16); break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0x000000ff)) break; ipadp += SIMD_COEF_32; opadp += SIMD_COEF_32; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { SHA256_CTX ctx; unsigned char k0[BINARY_SIZE]; if (B_LEN == BINARY_SIZE) { SHA256_Init( &ctx ); SHA256_Update( &ctx, key, len); SHA256_Final( k0, &ctx); } else { SHA224_Init( &ctx ); SHA224_Update( &ctx, key, len); SHA224_Final( k0, &ctx); } len = B_LEN; for(i=0;i<len;i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for(i=0;i<len;i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static void set_key_256(char *key, int index) { set_key(key, index, BINARY_SIZE); } static void set_key_224(char *key, int index) { set_key(key, index, BINARY_SIZE_224); } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int index; for(index = 0; index < count; index++) { // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if(((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1))+index/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static MAYBE_INLINE int cmp_one(void *binary, int index, const int B_LEN) { #ifdef SIMD_COEF_32 int i; for(i = 0; i < (B_LEN/4); i++) // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], B_LEN); #endif } static int cmp_one_256(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE); } static int cmp_one_224(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE_224); } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt, #ifdef SIMD_COEF_32 const unsigned EX_FLAGS #else const int B_LEN #endif ) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 unsigned int i, *pclear; if (new_keys) { SIMDSHA256body(&ipad[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); SIMDSHA256body(&opad[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); } SIMDSHA256body(cur_salt->salt[0], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); for (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++) SIMDSHA256body(cur_salt->salt[i], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); if (EX_FLAGS) { // NOTE, SSESHA224 will output 32 bytes. We need the first 28 (plus the 0x80 padding). // so we are forced to 'clean' this crap up, before using the crypt as the input. pclear = (unsigned int*)&crypt_key[(unsigned int)index/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32*4]; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) pclear[28/4*SIMD_COEF_32+(i&(SIMD_COEF_32-1))+i/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32] = 0x80000000; } SIMDSHA256body(&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); #else SHA256_CTX ctx; // Note, for oSSL, we really only need SHA256_Init and SHA224_Init. From that point // on, SHA256_Update/SHA256_Final can be used. Also, jtr internal sha2.c file works // like that. BUT I am not sure every hash engine works that way, so we are keeping // the 'full' block. if (B_LEN == BINARY_SIZE) { if (new_keys) { SHA256_Init(&ipad_ctx[index]); SHA256_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA256_Init(&opad_ctx[index]); SHA256_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA256_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA256_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA256_Update( &ctx, crypt_key[index], B_LEN); SHA256_Final( (unsigned char*) crypt_key[index], &ctx); } else { if (new_keys) { SHA224_Init(&ipad_ctx[index]); SHA224_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA224_Init(&opad_ctx[index]); SHA224_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA224_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA224_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA224_Update( &ctx, crypt_key[index], B_LEN); SHA224_Final( (unsigned char*) crypt_key[index], &ctx); } #endif } new_keys = 0; return count; } static int crypt_all_256(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 return crypt_all(pcount, salt, 0); #else return crypt_all(pcount, salt, BINARY_SIZE); #endif } static int crypt_all_224(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 return crypt_all(pcount, salt, SSEi_CRYPT_SHA224); #else return crypt_all(pcount, salt, BINARY_SIZE_224); #endif } static void *get_binary(char *ciphertext, const int B_LEN) { static union toalign { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 a[1]; } a; unsigned char *realcipher = a.c; int i,pos; for(i=strlen(ciphertext);ciphertext[i]!='#';i--); // allow # in salt pos=i+1; for(i=0;i<B_LEN;i++) realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+pos])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1+pos])]; #ifdef SIMD_COEF_32 alter_endianity(realcipher, B_LEN); #endif return (void*)realcipher; } static void *get_binary_256(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE); } static void *get_binary_224(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE_224); } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH+1]; int len; #ifdef SIMD_COEF_32 unsigned int i = 0; static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt; int salt_len = 0; #endif // allow # in salt len = strrchr(ciphertext, '#') - ciphertext; memset(salt, 0, sizeof(salt)); memcpy(salt, ciphertext, len); #ifdef SIMD_COEF_32 memset(&cur_salt, 0, sizeof(cur_salt)); while(((unsigned char*)salt)[salt_len]) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = ((unsigned char*)salt)[salt_len]; ++salt_len; } cur_salt.salt_len = salt_len; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80; ((unsigned int*)cur_salt.salt[(salt_len + 8) / PAD_SIZE])[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32] = (salt_len + PAD_SIZE) << 3; } return &cur_salt; #else return salt; #endif } #ifdef SIMD_COEF_32 // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) #define HASH_OFFSET (index & (SIMD_COEF_32 - 1)) + ((unsigned int)index / SIMD_COEF_32) * SIMD_COEF_32 * PAD_SIZE_W static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } #endif struct fmt_main fmt_hmacSHA256 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, tests }, { init_256, done, fmt_default_reset, fmt_default_prepare, valid_256, split_256, get_binary_256, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key_256, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_256, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_256, cmp_exact } }; struct fmt_main fmt_hmacSHA224 = { { FORMAT_LABEL_224, FORMAT_NAME, ALGORITHM_NAME_224, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_224, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, tests_224 }, { init_224, done, fmt_default_reset, fmt_default_prepare, valid_224, split_224, get_binary_224, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key_224, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_224, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_224, cmp_exact } }; #endif /* plugin stanza */
GB_unop__cosh_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cosh_fp64_fp64) // op(A') function: GB (_unop_tran__cosh_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = cosh (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cosh (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = cosh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COSH || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cosh_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = cosh (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = cosh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cosh_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
top_k_v2_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* The reason why we need the topk v2 is because the compatibility. We redefine the NaN is maximum value in the process of comparing. If do not add the topk v2, will affect the inference result of model that traing by the older version paddlepaddle. */ #pragma once #include <algorithm> #include <iostream> #include <utility> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n, int* post) { *pre = 1; *post = 1; *n = dim[axis]; for (int i = 0; i < axis; ++i) { (*pre) *= dim[i]; } for (int i = axis + 1; i < dim.size(); ++i) { (*post) *= dim[i]; } } template <typename T, typename Type> static void FullTopK(Type input_height, Type input_width, int input_dim, const framework::Tensor* input, T* t_out, Type* t_indices, const int& k, const bool& largest, const bool& sorted) { // when the k is small, will the partial sort bool partial_sort_flag = (k * 64) < input_width; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif // Eigen::DSizes<int, 2> flat2dims(input_height, input_width); for (Type i = 0; i < input_height; ++i) { std::vector<std::pair<T, Type>> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(j), j)); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j)); } } if (partial_sort_flag) { std::partial_sort( col_vec.begin(), col_vec.begin() + k, col_vec.end(), [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { if (largest) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); } else { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); } }); } else { // use the nth-element to get the K-larger or K-small element if (largest) { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort(col_vec.begin(), col_vec.begin() + k - 1, [&largest](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (std::isnan(static_cast<double>(l.first)) && !std::isnan(static_cast<double>(r.first))) || (l.first > r.first); }); } } else { std::nth_element( col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(), [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); // the nth-element will get the unorder elements, sort the element if (sorted) { std::sort( col_vec.begin(), col_vec.begin() + k - 1, [](const std::pair<T, Type>& l, const std::pair<T, Type>& r) { return (!std::isnan(static_cast<double>(l.first)) && std::isnan(static_cast<double>(r.first))) || (l.first < r.first); }); } } } for (Type j = 0; j < k; ++j) { t_out[i * k + j] = col_vec[j].first; t_indices[i * k + j] = col_vec[j].second; } } } template <typename T, typename Type> static void FullTopKAssign(const Type& input_height, const Type& input_width, const int& input_dim, const framework::Tensor* input, const framework::Tensor* indices, T* output_data, const int& k) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { if (input_dim == 1) { auto e_input = framework::EigenVector<T>::Flatten(*input); auto e_indices = framework::EigenVector<Type>::Flatten(*indices); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(j)] = e_input(j); } } else { auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1); auto e_indices = framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1); for (Type j = 0; j < k; ++j) { output_data[i * input_width + e_indices(i, j)] = e_input(i, j); } } } } template <typename DeviceContext, typename T> class TopkV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { // Get the top k elements of each row of input tensor auto* input = context.Input<Tensor>("X"); auto* output = context.Output<Tensor>("Out"); auto* indices = context.Output<Tensor>("Indices"); const auto& in_dims = input->dims(); int k = static_cast<int>(context.Attr<int>("k")); const auto& sorted = static_cast<bool>(context.Attr<bool>("sorted")); const auto& largest = static_cast<bool>(context.Attr<bool>("largest")); // axis < 0, cacluate the real axis int axis = static_cast<int>(context.Attr<int>("axis")); if (axis < 0) axis += in_dims.size(); // if K tensor is not null, will the use K tesnor as k auto* k_t = context.Input<Tensor>("K"); if (k_t) { k = k_t->data<int>()[0]; framework::DDim output_dims = output->dims(); // accroding to axis to set K value in the dim output_dims[axis] = k; output->Resize(output_dims); indices->Resize(output_dims); } T* output_data = output->mutable_data<T>(context.GetPlace()); int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace()); const auto& out_dims = output->dims(); if (axis + 1 == in_dims.size()) { const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), input, output_data, indices_data, k, largest, sorted); } else { // if the topk dims is not last dim, will tranpose and do topk std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); // get the trans input_dims, out_dims framework::DDim trans_dims(in_dims); framework::DDim trans_out_dims(output->dims()); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; } for (size_t i = 0; i < trans.size(); i++) { trans_out_dims[i] = out_dims[trans[i]]; } Tensor trans_inp; trans_inp.mutable_data<T>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // transpose the input value TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input, &trans_inp, trans); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; // Allocate the temp tensor to the save the topk indices, values Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace()); Tensor tmp_indices; auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace()); // get the TopK value FullTopK<T, int64_t>(input_height, input_width, in_dims.size(), &trans_inp, t_out, t_ind, k, largest, sorted); // transpose back TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, tmp_indices, indices, trans); TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, output, trans); } } }; template <typename DeviceContext, typename T> class TopkV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); int axis = static_cast<int>(context.Attr<int>("axis")); const auto& in_dims = x->dims(); const auto& out_dims = indices->dims(); // axis < 0, get the real axis axis = (axis < 0) ? (in_dims.size() + axis) : axis; const size_t& k = out_dims[axis]; T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); if (axis + 1 == in_dims.size()) { // allocate the memory for the input_grad // assign the out_grad to input_grad directly const int64_t input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; // init the output grad with 0, because some input elements has no grad memset(x_grad_data, 0, x_grad->numel() * sizeof(T)); // Assign the output_grad to input_grad FullTopKAssign(input_height, input_width, in_dims.size(), out_grad, indices, x_grad_data, k); } else { // can not assign grad to input_grad, must do the transpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(out_dims.size() - 1); for (int i = axis + 1; i < out_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); framework::DDim trans_dims(out_dims); framework::DDim trans_in_dims(in_dims); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = out_dims[trans[i]]; trans_in_dims[i] = in_dims[trans[i]]; } // transpose the out_grad, indices Tensor trans_dO; trans_dO.mutable_data<T>(trans_dims, context.GetPlace()); Tensor trans_ind; trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace()); int ndims = trans.size(); auto& dev_context = context.template device_context<platform::CPUDeviceContext>(); // Do transpose TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *out_grad, &trans_dO, trans); TransCompute<platform::CPUDeviceContext, int64_t>( ndims, dev_context, *indices, &trans_ind, trans); const int64_t input_height = phi::product( phi::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1)); const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1]; // Assign the out_grad to tranpose input_grad Tensor tmp_out; T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace()); memset(t_out, 0, x_grad->numel() * sizeof(T)); FullTopKAssign<T, int64_t>(input_height, input_width, in_dims.size(), &trans_dO, &trans_ind, t_out, k); // Transpose back TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out, x_grad, trans); } } }; } // namespace operators } // namespace paddle
GB_AxB_dot2.c
//------------------------------------------------------------------------------ // GB_AxB_dot2: compute C=A'*B or C<!M>=A'*B in parallel, in place //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // GB_AxB_dot2 does its computation in two phases. The first phase counts the // number of entries in each column of C. The second phase can then construct // the result C in place, and thus this method can be done in parallel, for the // single matrix computation C=A'*B. // Two variants are handled: C=A'*B and C<!M>=A'*B. // The C<M>=A'*B computation is computed by GB_AxB_dot3. #include "GB_mxm.h" #include "GB_iterator.h" #ifndef GBCOMPACT #include "GB_AxB__include.h" #endif #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (B_slice, nbslice+1, sizeof (int64_t)) ; \ if (C_counts != NULL) \ { \ for (int taskid = 0 ; taskid < naslice ; taskid++) \ { \ GB_FREE_MEMORY (C_counts [taskid], cnvec, sizeof (int64_t)) ; \ } \ } \ GB_FREE_MEMORY (C_counts, naslice, sizeof (int64_t *)) ; \ } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_AxB_dot2 // C=A'*B or C<!M>=A'*B, dot product method ( GrB_Matrix *Chandle, // output matrix const GrB_Matrix M, // mask matrix for C<!M>=A'*B // if present, the mask is complemented const bool Mask_struct, // if true, use the only structure of M const GrB_Matrix *Aslice, // input matrices (already sliced) const GrB_Matrix B, // input matrix const GrB_Semiring semiring, // semiring that defines C=A*B const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b) bool *mask_applied, // if true, mask was applied int nthreads, int naslice, int nbslice, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (Aslice != NULL) ; GrB_Matrix A = Aslice [0] ; // just for type and dimensions ASSERT (Chandle != NULL) ; ASSERT (*Chandle == NULL) ; ASSERT_MATRIX_OK_OR_NULL (M, "M for dot A'*B", GB0) ; ASSERT_MATRIX_OK (A, "A for dot A'*B", GB0) ; for (int taskid = 0 ; taskid < naslice ; taskid++) { ASSERT_MATRIX_OK (Aslice [taskid], "A slice for dot2 A'*B", GB0) ; ASSERT (!GB_PENDING (Aslice [taskid])) ; ASSERT (!GB_ZOMBIES (Aslice [taskid])) ; ASSERT ((Aslice [taskid])->vlen == B->vlen) ; ASSERT (A->vlen == (Aslice [taskid])->vlen) ; ASSERT (A->vdim == (Aslice [taskid])->vdim) ; ASSERT (A->type == (Aslice [taskid])->type) ; } ASSERT_MATRIX_OK (B, "B for dot A'*B", GB0) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT_SEMIRING_OK (semiring, "semiring for numeric A'*B", GB0) ; ASSERT (A->vlen == B->vlen) ; ASSERT (mask_applied != NULL) ; int64_t *GB_RESTRICT B_slice = NULL ; int64_t **C_counts = NULL ; int64_t cnvec = B->nvec ; //-------------------------------------------------------------------------- // get the semiring operators //-------------------------------------------------------------------------- GrB_BinaryOp mult = semiring->multiply ; GrB_Monoid add = semiring->add ; ASSERT (mult->ztype == add->op->ztype) ; bool op_is_first = mult->opcode == GB_FIRST_opcode ; bool op_is_second = mult->opcode == GB_SECOND_opcode ; bool op_is_pair = mult->opcode == GB_PAIR_opcode ; bool A_is_pattern = false ; bool B_is_pattern = false ; if (flipxy) { // z = fmult (b,a) will be computed A_is_pattern = op_is_first || op_is_pair ; B_is_pattern = op_is_second || op_is_pair ; ASSERT (GB_IMPLIES (!A_is_pattern, GB_Type_compatible (A->type, mult->ytype))) ; ASSERT (GB_IMPLIES (!B_is_pattern, GB_Type_compatible (B->type, mult->xtype))) ; } else { // z = fmult (a,b) will be computed A_is_pattern = op_is_second || op_is_pair ; B_is_pattern = op_is_first || op_is_pair ; ASSERT (GB_IMPLIES (!A_is_pattern, GB_Type_compatible (A->type, mult->xtype))) ; ASSERT (GB_IMPLIES (!B_is_pattern, GB_Type_compatible (B->type, mult->ytype))) ; } (*Chandle) = NULL ; //-------------------------------------------------------------------------- // allocate workspace and slice B //-------------------------------------------------------------------------- if (!GB_pslice (&B_slice, /* B */ B->p, B->nvec, nbslice)) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute # of entries in each vector of C //-------------------------------------------------------------------------- GrB_Type ctype = add->op->ztype ; int64_t cvlen = A->vdim ; int64_t cvdim = B->vdim ; if (B->nvec_nonempty < 0) { B->nvec_nonempty = GB_nvec_nonempty (B, NULL) ; } GB_CALLOC_MEMORY (C_counts, naslice, sizeof (int64_t *)) ; if (C_counts == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++) { int64_t *GB_RESTRICT C_count = NULL ; GB_CALLOC_MEMORY (C_count, B->nvec, sizeof (int64_t)) ; if (C_count == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } C_counts [a_taskid] = C_count ; } for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++) { if ((Aslice [a_taskid])->nvec_nonempty < 0) { (Aslice [a_taskid])->nvec_nonempty = GB_nvec_nonempty (Aslice [a_taskid], NULL) ; } } #define GB_PHASE_1_OF_2 #include "GB_AxB_dot2_meta.c" #undef GB_PHASE_1_OF_2 GB_NEW (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true, GB_SAME_HYPER_AS (B->is_hyper), B->hyper_ratio, cnvec, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_WORK ; return (info) ; } GrB_Matrix C = (*Chandle) ; int64_t *GB_RESTRICT Cp = C->p ; // cumulative sum of counts in each column int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < cnvec ; k++) { int64_t s = 0 ; for (int taskid = 0 ; taskid < naslice ; taskid++) { int64_t *GB_RESTRICT C_count = C_counts [taskid] ; int64_t c = C_count [k] ; C_count [k] = s ; s += c ; } Cp [k] = s ; } Cp [cnvec] = 0 ; C->nvec = cnvec ; // Cp = cumulative sum of Cp GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ; int64_t cnz = Cp [cnvec] ; // C->h = B->h if (B->is_hyper) { GB_memcpy (C->h, B->h, cnvec * sizeof (int64_t), nthreads) ; } // free C_count for the first thread; it is no longer needed GB_FREE_MEMORY (C_counts [0], cnvec, sizeof (int64_t)) ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // allocate C->x and C->i //-------------------------------------------------------------------------- info = GB_ix_alloc (C, cnz, true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_MATRIX_FREE (Chandle) ; GB_FREE_WORK ; return (info) ; } //-------------------------------------------------------------------------- // C = A'*B, computing each entry with a dot product, via builtin semiring //-------------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT //-------------------------------------------------------------------------- // define the worker for the switch factory //-------------------------------------------------------------------------- #define GB_Adot2B(add,mult,xyname) GB_Adot2B_ ## add ## mult ## xyname #define GB_AxB_WORKER(add,mult,xyname) \ { \ info = GB_Adot2B (add,mult,xyname) (C, M, Mask_struct, \ Aslice, A_is_pattern, B, B_is_pattern, B_slice, \ C_counts, nthreads, naslice, nbslice) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //-------------------------------------------------------------------------- // launch the switch factory //-------------------------------------------------------------------------- GB_Opcode mult_opcode, add_opcode ; GB_Type_code xycode, zcode ; if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern, semiring, flipxy, &mult_opcode, &add_opcode, &xycode, &zcode)) { #include "GB_AxB_factory.c" } ASSERT (info == GrB_SUCCESS || info == GrB_NO_VALUE) ; #endif //-------------------------------------------------------------------------- // C = A'*B, computing each entry with a dot product, with typecasting //-------------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (C, "generic ") ; //---------------------------------------------------------------------- // get operators, functions, workspace, contents of A, B, C, and M //---------------------------------------------------------------------- GxB_binary_function fmult = mult->function ; GxB_binary_function fadd = add->op->function ; size_t csize = C->type->size ; size_t asize = A_is_pattern ? 0 : A->type->size ; size_t bsize = B_is_pattern ? 0 : B->type->size ; size_t xsize = mult->xtype->size ; size_t ysize = mult->ytype->size ; // scalar workspace: because of typecasting, the x/y types need not // be the same as the size of the A and B types. // flipxy false: aki = (xtype) A(k,i) and bkj = (ytype) B(k,j) // flipxy true: aki = (ytype) A(k,i) and bkj = (xtype) B(k,j) size_t aki_size = flipxy ? ysize : xsize ; size_t bkj_size = flipxy ? xsize : ysize ; GB_void *GB_RESTRICT terminal = add->terminal ; GB_cast_function cast_A, cast_B ; if (flipxy) { // A is typecasted to y, and B is typecasted to x cast_A = A_is_pattern ? NULL : GB_cast_factory (mult->ytype->code, A->type->code) ; cast_B = B_is_pattern ? NULL : GB_cast_factory (mult->xtype->code, B->type->code) ; } else { // A is typecasted to x, and B is typecasted to y cast_A = A_is_pattern ? NULL : GB_cast_factory (mult->xtype->code, A->type->code) ; cast_B = B_is_pattern ? NULL : GB_cast_factory (mult->ytype->code, B->type->code) ; } //---------------------------------------------------------------------- // C = A'*B via dot products, function pointers, and typecasting //---------------------------------------------------------------------- // aki = A(k,i), located in Ax [pA] #define GB_GETA(aki,Ax,pA) \ GB_void aki [GB_VLA(aki_size)] ; \ if (!A_is_pattern) cast_A (aki, Ax +((pA)*asize), asize) // bkj = B(k,j), located in Bx [pB] #define GB_GETB(bkj,Bx,pB) \ GB_void bkj [GB_VLA(bkj_size)] ; \ if (!B_is_pattern) cast_B (bkj, Bx +((pB)*bsize), bsize) // break if cij reaches the terminal value #define GB_DOT_TERMINAL(cij) \ if (terminal != NULL && memcmp (cij, terminal, csize) == 0) \ { \ break ; \ } // C(i,j) = A(i,k) * B(k,j) #define GB_MULT(cij, aki, bkj) \ GB_MULTIPLY (cij, aki, bkj) // C(i,j) += A(i,k) * B(k,j) #define GB_MULTADD(cij, aki, bkj) \ GB_void zwork [GB_VLA(csize)] ; \ GB_MULTIPLY (zwork, aki, bkj) ; \ fadd (cij, cij, zwork) // define cij for each task #define GB_CIJ_DECLARE(cij) \ GB_void cij [GB_VLA(csize)] // address of Cx [p] #define GB_CX(p) Cx +((p)*csize) // save the value of C(i,j) #define GB_CIJ_SAVE(cij,p) \ memcpy (GB_CX (p), cij, csize) #define GB_ATYPE GB_void #define GB_BTYPE GB_void #define GB_CTYPE GB_void #define GB_PHASE_2_OF_2 // no vectorization #define GB_PRAGMA_VECTORIZE #define GB_PRAGMA_VECTORIZE_DOT if (flipxy) { #define GB_MULTIPLY(z,x,y) fmult (z,y,x) #include "GB_AxB_dot2_meta.c" #undef GB_MULTIPLY } else { #define GB_MULTIPLY(z,x,y) fmult (z,x,y) #include "GB_AxB_dot2_meta.c" #undef GB_MULTIPLY } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; ASSERT_MATRIX_OK (C, "dot: C = A'*B output", GB0) ; ASSERT (*Chandle == C) ; (*mask_applied) = (M != NULL) ; return (GrB_SUCCESS) ; }
GB_binop__rminus_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fc64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc64) // A*D function (colscale): GB (_AxD__rminus_fc64) // D*A function (rowscale): GB (_DxB__rminus_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc64) // C=scalar+B GB (_bind1st__rminus_fc64) // C=scalar+B' GB (_bind1st_tran__rminus_fc64) // C=A+scalar GB (_bind2nd__rminus_fc64) // C=A'+scalar GB (_bind2nd_tran__rminus_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_minus (bij, aij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC64_minus (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FC64 || GxB_NO_RMINUS_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_minus (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_minus (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_minus (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_minus (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fp64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_uint16 // op(A') function: GB_unop_tran__identity_fp64_uint16 // C type: double // A type: uint16_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_uint16 ( double *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
8898.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp parallel for for (i = 1; i < n+1; i++) { #pragma omp parallel for num_threads(2) for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
omploopstatic.h
// -*- C++ -*- // Copyright (C) 2007-2016 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/omp_loop_static.h * @brief Parallelization of embarrassingly parallel execution by * means of an OpenMP for loop with static scheduling. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H #define _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H 1 #include <omp.h> #include <parallel/settings.h> #include <parallel/basic_iterator.h> namespace __gnu_parallel { /** @brief Embarrassingly parallel algorithm for random access * iterators, using an OpenMP for loop with static scheduling. * * @param __begin Begin iterator of element sequence. * @param __end End iterator of element sequence. * @param __o User-supplied functor (comparator, predicate, adding * functor, ...). * @param __f Functor to @a process an element with __op (depends on * desired functionality, e. g. for std::for_each(), ...). * @param __r Functor to @a add a single __result to the already processed * __elements (depends on functionality). * @param __base Base value for reduction. * @param __output Pointer to position where final result is written to * @param __bound Maximum number of elements processed (e. g. for * std::count_n()). * @return User-supplied functor (that may contain a part of the result). */ template<typename _RAIter, typename _Op, typename _Fu, typename _Red, typename _Result> _Op __for_each_template_random_access_omp_loop_static(_RAIter __begin, _RAIter __end, _Op __o, _Fu& __f, _Red __r, _Result __base, _Result& __output, typename std::iterator_traits<_RAIter>::difference_type __bound) { typedef typename std::iterator_traits<_RAIter>::difference_type _DifferenceType; _DifferenceType __length = __end - __begin; _ThreadIndex __num_threads = std::min<_DifferenceType> (__get_max_threads(), __length); _Result *__thread_results; # pragma omp parallel num_threads(__num_threads) { # pragma omp single { __num_threads = omp_get_num_threads(); __thread_results = new _Result[__num_threads]; for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __thread_results[__i] = _Result(); } _ThreadIndex __iam = omp_get_thread_num(); #pragma omp for schedule(static, _Settings::get().workstealing_chunk_size) for (_DifferenceType __pos = 0; __pos < __length; ++__pos) __thread_results[__iam] = __r(__thread_results[__iam], __f(__o, __begin+__pos)); } //parallel for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __output = __r(__output, __thread_results[__i]); delete [] __thread_results; // Points to last element processed (needed as return value for // some algorithms like transform). __f.finish_iterator = __begin + __length; return __o; } } // end namespace #endif /* _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H */
strmm.c
#include "blas.h" #include "error.h" #include <stdio.h> #include "handle.h" #include "config.h" #include "strmm.fatbin.c" static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; } static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; } static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, const void * B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static const float zero = 0.0f; static const float one = 1.0f; void strmm(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, float alpha, const float * restrict A, size_t lda, float * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0) return; if (alpha == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) B[j * ldb + i] = zero; } return; } if (side == CBlasLeft) { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t k = 0; k < m; k++) { if (B[j * ldb + k] != zero) { register float temp = alpha * B[j * ldb + k]; for (size_t i = 0; i < k; i++) B[j * ldb + i] += temp * A[k * lda + i]; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; B[j * ldb + k] = temp; } } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t k = m - 1; do { if (B[j * ldb + k] != zero) { register float temp = alpha * B[j * ldb + k]; B[j * ldb + k] = temp; if (diag == CBlasNonUnit) B[j * ldb + k] *= A[k * lda + k]; for (size_t i = k + 1; i < m; i++) B[j * ldb + i] += temp * A[k * lda + i]; } } while (k-- > 0); } } } else { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t i = m - 1; do { register float temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = 0; k < i; k++) temp += A[i * lda + k] * B[j * ldb + k]; B[j * ldb + i] = alpha * temp; } while (i-- > 0); } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register float temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = i + 1; k < m; k++) temp += A[i * lda + k] * B[j * ldb + k]; B[j * ldb + i] = alpha * temp; } } } } } else { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t j = n - 1; do { register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; for (size_t k = 0; k < j; k++) { if (A[j * lda + k] != zero) { register float temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } } while (j-- > 0); } else { for (size_t j = 0; j < n; j++) { register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] *= temp; for (size_t k = j + 1; k < n; k++) { if (A[j * lda + k] != zero) { register float temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } } } } else { if (uplo == CBlasUpper) { for (size_t k = 0; k < n; k++) { for (size_t j = 0; j < k; j++) { if (A[k * lda + j] != zero) { register float temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] = temp * B[k * ldb + i]; } } } else { size_t k = n - 1; do { for (size_t j = k + 1; j < n; j++) { if (A[k * lda + j] != zero) { register float temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) B[j * ldb + i] += temp * B[k * ldb + i]; } } register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) B[k * ldb + i] = temp * B[k * ldb + i]; } } while (k-- > 0); } } } } void strmm2(CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, float alpha, const float * restrict A, size_t lda, const float * restrict B, size_t ldb, float * restrict X, size_t ldx) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; else if (ldx < m) info = 13; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0) return; if (alpha == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) X[j * ldx + i] = zero; } return; } if (side == CBlasLeft) { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t k = 0; k < m; k++) { register float temp = B[j * ldb + k]; if (temp != zero) { temp *= alpha; for (size_t i = 0; i < k; i++) X[j * ldx + i] += temp * A[k * lda + i]; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; } X[j * ldx + k] = temp; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t k = m - 1; do { if (B[j * ldb + k] != zero) { register float temp = alpha * B[j * ldb + k]; X[j * ldx + k] = temp; if (diag == CBlasNonUnit) X[j * ldx + k] *= A[k * lda + k]; for (size_t i = k + 1; i < m; i++) X[j * ldx + i] += temp * A[k * lda + i]; } else X[j * ldx + k] = B[j * ldb + k]; } while (k-- > 0); } } } else { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { size_t i = m - 1; do { register float temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = 0; k < i; k++) temp += A[i * lda + k] * B[j * ldb + k]; X[j * ldx + i] = alpha * temp; } while (i-- > 0); } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register float temp = B[j * ldb + i]; if (diag == CBlasNonUnit) temp *= A[i * lda + i]; for (size_t k = i + 1; k < m; k++) temp += A[i * lda + k] * B[j * ldb + k]; X[j * ldx + i] = alpha * temp; } } } } } else { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t j = n - 1; do { register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] = temp * B[j * ldb + i]; for (size_t k = 0; k < j; k++) { if (A[j * lda + k] != zero) { register float temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } } while (j-- > 0); } else { for (size_t j = 0; j < n; j++) { register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[j * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] = temp * B[j * ldb + i]; for (size_t k = j + 1; k < n; k++) { if (A[j * lda + k] != zero) { register float temp = alpha * A[j * lda + k]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } } } } else { if (uplo == CBlasUpper) { for (size_t k = 0; k < n; k++) { for (size_t j = 0; j < k; j++) { if (A[k * lda + j] != zero) { register float temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) X[k * ldx + i] = temp * B[k * ldb + i]; } } } else { size_t k = n - 1; do { for (size_t j = k + 1; j < n; j++) { if (A[k * lda + j] != zero) { register float temp = alpha * A[k * lda + j]; for (size_t i = 0; i < m; i++) X[j * ldx + i] += temp * B[k * ldb + i]; } } register float temp = alpha; if (diag == CBlasNonUnit) temp *= A[k * lda + k]; if (temp != one) { for (size_t i = 0; i < m; i++) X[k * ldx + i] = temp * B[k * ldb + i]; } } while (k-- > 0); } } } } CUresult cuStrmm2(CUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, float alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, CUdeviceptr X, size_t ldx, CUstream stream) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; else if (ldx < m) info = 13; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); if (handle->strmm2 == NULL) CU_ERROR_CHECK(cuModuleLoadData(&handle->strmm2, imageBytes)); const unsigned int mb = (side == CBlasLeft && trans != CBlasNoTrans) ? 32 : 64; const unsigned int nb = (side == CBlasLeft && trans != CBlasNoTrans) ? 32 : 16; const unsigned int kb = (side == CBlasLeft && trans != CBlasNoTrans) ? 8 : 16; const unsigned int bx = (side == CBlasLeft && trans != CBlasNoTrans) ? 8 : 16; const unsigned int by = (side == CBlasLeft && trans != CBlasNoTrans) ? 8 : 4; char name[69]; snprintf(name, 69, "_Z9strmm2%c%c%cIL9CBlasDiag%dELj%uELj%uELj%uELj%uELj%uEEvPKfS2_Pffiiiii", side, uplo, trans, diag, mb, nb, kb, bx, by); CUfunction function; CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->strmm2, name)); void * params[] = { &A, &B, &X, &alpha, &lda, &ldb, &ldx, &m, &n }; CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1, bx, by, 1, 0, stream, params, NULL)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuStrmm(CUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, float alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, CUstream stream) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); CUdeviceptr X; size_t ldx; CU_ERROR_CHECK(cuMemAllocPitch(&X, &ldx, m * sizeof(float), n, sizeof(float))); ldx /= sizeof(float); CU_ERROR_CHECK(cuStrmm2(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, X, ldx, stream)); CU_ERROR_CHECK(cuMemcpyDtoD2DAsync(B, ldb, 0, 0, X, ldx, 0, 0, m, n, sizeof(float), stream)); CU_ERROR_CHECK(cuMemFree(X)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuMultiGPUStrmm(CUmultiGPUBLAShandle handle, CBlasSide side, CBlasUplo uplo, CBlasTranspose trans, CBlasDiag diag, size_t m, size_t n, float alpha, const float * restrict A, size_t lda, float * restrict B, size_t ldb) { const size_t nRowA = (side == CBlasLeft) ? m : n; int info = 0; if (lda < nRowA) info = 9; else if (ldb < m) info = 11; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0) return CUDA_SUCCESS; if (alpha == zero) { sgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb); return CUDA_SUCCESS; } const size_t mb = (trans == CBlasNoTrans) ? SGEMM_N_MB : SGEMM_T_MB; const size_t nb = SGEMM_N_NB; if (m <= mb || n <= nb) { strmm(side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb); return CUDA_SUCCESS; } if (side == CBlasLeft) { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { size_t i = (m + mb - 1) & ~(mb - 1); do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } else { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } } else { if (uplo == CBlasUpper) { for (size_t i = 0; i < m; i += mb) { const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasLeft, CBlasUpper, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } } else { size_t i = (m + mb - 1) & ~(mb - 1); do { i -= mb; const size_t ib = min(mb, m - i); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasLeft, CBlasLower, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb); } while (i > 0); } } } else { if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } else { size_t j = (n + nb - 1) & ~(nb - 1); do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } } else { if (uplo == CBlasUpper) { size_t j = (n + nb - 1) & ~(nb - 1); do { j -= nb; const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasRight, CBlasUpper, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } while (j > 0); } else { for (size_t j = 0; j < n; j += nb) { const size_t jb = min(nb, n - j); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb)); CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle)); strmm(CBlasRight, CBlasLower, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb); } } } } return CUDA_SUCCESS; }
matmul_c_core.c
/******************************************************************** * BenchIT - Performance Measurement for Scientific Applications * Contact: developer@benchit.org * * $Id: matmul_c_core.c 1 2009-09-11 12:26:19Z william $ * $URL: svn+ssh://william@rupert.zih.tu-dresden.de/svn-base/benchit-root/BenchITv6/kernel/numerical/matmul/C/OpenMP/0/double/matmul_c_core.c $ * For license details see COPYING in the package base directory *******************************************************************/ /* Kernel: Matrix Multiply (C) *******************************************************************/ #include "stdio.h" #include "stdlib.h" #include <string.h> #include "matmul.h" #include "interface.h" void multaijk_(double *a, double *b, double *c, int *size); void multaikj_(double *a, double *b, double *c, int *size); void multajik_(double *a, double *b, double *c, int *size); void multajki_(double *a, double *b, double *c, int *size); void multakji_(double *a, double *b, double *c, int *size); void multakij_(double *a, double *b, double *c, int *size); double getlanguage_(void); void multaijk_(double *a, double *b, double *c, int *size) { int i, j, k; int s = *size; #pragma omp parallel for private(i,j,k) for (i = 0; i < s; i++) for (j = 0; j < s; j++) for (k = 0; k < s; k++) { c[ i * s + j ] = c[ i * s + j ] + a[ i * s + k ] * b[ k * s + j ]; } } void multaikj_(double *a, double *b, double *c, int *size) { int i, j, k; int s = *size; #pragma omp parallel for private(i,j,k) for (i = 0; i < s; i++) for (k = 0; k < s; k++) for (j = 0; j < s; j++) { c[ i * s + j ] = c[ i * s + j ] + a[ i * s + k ] * b[ k * s + j ]; } } void multajik_(double *a, double *b, double *c, int *size) { int i, j, k; int s = *size; #pragma omp parallel for private(i,j,k) for (j = 0; j < s; j++) for (i = 0; i < s; i++) for (k = 0; k < s; k++) { c[ i * s + j ] = c[ i * s + j ] + a[ i * s + k ] * b[ k * s + j ]; } } void multajki_(double *a, double *b, double *c, int *size) { int i, j, k; int s = *size; #pragma omp parallel for private(i,j,k) for (j = 0; j < s; j++) for (k = 0; k < s; k++) for (i = 0; i < s; i++) { c[ i * s + j ] = c[ i * s + j ] + a[ i * s + k ] * b[ k * s + j ]; } } void multakij_(double *a, double *b, double *c, int *size) { int i, j, k; int s = *size; #pragma omp parallel for private(i,j,k) for (k = 0; k < s; k++) for (i = 0; i < s; i++) for (j = 0; j < s; j++) { c[ i * s + j ] = c[ i * s + j ] + a[ i * s + k ] * b[ k * s + j ]; } } void multakji_(double *a, double *b, double *c, int *size) { int i, j, k; int s = *size; #pragma omp parallel for private(i,j,k) for (k = 0; k < s; k++) for (j = 0; j < s; j++) for (i = 0; i < s; i++) { c[ i * s + j ] = c[ i * s + j ] + a[ i * s + k ] * b[ k * s + j ]; } } double getlanguage_() { return 1.0; }
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand> (recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const &stored_entry : store_) { const int key = stored_entry.first; const NDArray &stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto &stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto &update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const &stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i=0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i=0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: DataHandleDefault(type, req_meta, req_data, server); break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, const ps::KVPairs<char>& req_data, UpdateBuf *update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_) { exec_.Exec([this, key, &update, &stored](){ CHECK(updater_); updater_(key, update, &stored); }); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } /** * Request can be for either push, pull or pushpull * If pull flag is set, respond immediately with the updated values * Otherwise, only send the notification */ bool has_pull = false; for (const auto& req : update_buf->request) { has_pull = has_pull || req.pull; } if (has_pull) { // if there is a pull request, perform WaitToRead() once before DefaultStorageResponse if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); for (const auto& req : update_buf->request) { if (req.pull) { DefaultStorageResponse(type, key, req, req_data, server); } } update_buf->request.clear(); } else { // otherwise, send response directly for (const auto& req : update_buf->request) { server->Response(req); } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char *> (stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; mxnet::TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; mxnet::TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = mxnet::TShape{(int64_t) original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto &updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
core_math.h
// == mojo ==================================================================== // // Copyright (c) gnawice@gnawice.com. All rights reserved. // See LICENSE in root folder // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files(the "Software"), // to deal in the Software without restriction, including without // limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following // conditions : // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT // OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR // THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // ============================================================================ // core_math.h: defines matrix class and math functions // ==================================================================== mojo == #pragma once #include <math.h> #include <string.h> #include <string> #include <cstdlib> #include <random> #include <algorithm> #include <immintrin.h> #include <vector> # include "../ziggurat_rand_gen/ziggurat.h" void printf(const char *fmt, ...); namespace mojo { enum pad_type { zero = 0, edge = 1, median_edge = 2 }; inline float dot(const float *x1, const float *x2, const int size) { switch (size) { case 1: return x1[0] * x2[0]; case 2: return x1[0] * x2[0] + x1[1] * x2[1]; case 3: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]; case 4: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3]; case 5: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3] + x1[4] * x2[4]; default: float v = 0; for (int i = 0; i<size; i++) v += x1[i] * x2[i]; return v; }; } inline float unwrap_2d_dot(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) { // printf(">>>>>>> %f %f\n", x1[stride1*j], x2[stride2*j]); v+= dot(&x1[stride1*j],&x2[stride2*j],size); } return v; } // second item is rotated 180 (this is a convolution) inline float dot_rot180(const float *x1, const float *x2, const int size) { switch(size) { case 1: return x1[0]*x2[0]; case 2: return x1[0]*x2[1]+x1[1]*x2[0]; case 3: return x1[0]*x2[2]+x1[1]*x2[1]+x1[2]*x2[0]; case 4: return x1[0]*x2[3]+x1[1]*x2[2]+x1[2]*x2[1]+x1[3]*x2[0]; case 5: return x1[0]*x2[4]+x1[1]*x2[3]+x1[2]*x2[2]+x1[3]*x2[1]+x1[4]*x2[0]; default: float v=0; for(int i=0; i<size; i++) v+=x1[i]*x2[size-i-1]; return v; }; } inline float unwrap_2d_dot_rot180(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) { v+= dot_rot180(&x1[stride1*j],&x2[stride2*(size-j-1)],size); } return v; } inline void unwrap_aligned_NxN(const int N, float *aligned_out, const float *in, const int in_size, const int stride = 1) { const int node_size = (in_size - N)/stride + 1; int c1 = 0; int off = 0; const int inc_off = N*N*8; for (int j = 0; j < node_size; j += 1) // intput h { for (int i = 0; i < node_size; i += 1) // intput w { const float *tn = in + j*in_size + i; if(N==5) { for (int k = 0; k < 5; k++) { aligned_out[c1 + 0 + k * 40 + off] = tn[0 + 0 + in_size*k]; aligned_out[c1 + 8 + k * 40 + off] = tn[0 + 1 + in_size*k]; aligned_out[c1 + 16 + k * 40 + off] = tn[0 + 2 + in_size*k]; aligned_out[c1 + 24 + k * 40 + off] = tn[0 + 3 + in_size*k]; aligned_out[c1 + 32 + k * 40 + off] = tn[0 + 4 + in_size*k]; } } else if(N==3) { aligned_out[c1 + off] = tn[0]; aligned_out[c1 + 8 + off] = tn[0 + 1]; aligned_out[c1 + 16 + off] = tn[0 + 2]; aligned_out[c1 + 24 + off] = tn[0 + in_size]; aligned_out[c1 + 32 + off] = tn[0 + 1 + in_size]; aligned_out[c1 + 40 + off] = tn[0 + 2 + in_size]; aligned_out[c1 + 48 + off] = tn[0 + 2 * in_size]; aligned_out[c1 + 56 + off] = tn[0 + 1 + 2 * in_size]; aligned_out[c1 + 64 + off] = tn[0 + 2 + 2 * in_size]; } else { int cnt=0; for (int k = 0; k < N; k++) { for (int m = 0; m < N; m++) { aligned_out[c1 + cnt*8 + off] = tn[0 + m + in_size*k]; cnt++; } } } off++; if (off > 7) { off = 0; c1 += inc_off; } } } } inline void dotsum_unwrapped_NxN(const int N, const float *im, const float *filter_ptr, float *out, const int outsize) { const int NN=N*N; for (int j = 0; j < outsize; j += 8) { float *c = out+j; for(int i=0; i<NN; i++) { const float f = filter_ptr[i]; c[0]+=im[0]*f; c[1]+=im[1]*f; c[2]+=im[2]*f; c[3]+=im[3]*f; c[4]+=im[4]*f; c[5]+=im[5]*f; c[6]+=im[6]*f; c[7]+=im[7]*f; im+=8; } } } #ifdef MOJO_AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 32; } _mm256_zeroupper(); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 72; } _mm256_zeroupper(); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 128; } _mm256_zeroupper(); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); const __m256 f16 = _mm256_broadcast_ss(&filter_ptr[16]); const __m256 f17 = _mm256_broadcast_ss(&filter_ptr[17]); const __m256 f18 = _mm256_broadcast_ss(&filter_ptr[18]); const __m256 f19 = _mm256_broadcast_ss(&filter_ptr[19]); const __m256 f20 = _mm256_broadcast_ss(&filter_ptr[20]); const __m256 f21 = _mm256_broadcast_ss(&filter_ptr[21]); const __m256 f22 = _mm256_broadcast_ss(&filter_ptr[22]); const __m256 f23 = _mm256_broadcast_ss(&filter_ptr[23]); const __m256 f24 = _mm256_broadcast_ss(&filter_ptr[24]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 128); c1 = _mm256_mul_ps(a, f16); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 136); c1 = _mm256_mul_ps(a, f17); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 144); c1 = _mm256_mul_ps(a, f18); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 152); c1 = _mm256_mul_ps(a, f19); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 160); c1 = _mm256_mul_ps(a, f20); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 168); c1 = _mm256_mul_ps(a, f21); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 176); c1 = _mm256_mul_ps(a, f22); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 184); c1 = _mm256_mul_ps(a, f23); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 192); c1 = _mm256_mul_ps(a, f24); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 200; } _mm256_zeroupper(); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); __m256 f[49];//=new __m256(s); for(int i=0; i<49; i++) f[i]= _mm256_broadcast_ss(&filter_ptr[i]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f[0]); for(int i=1; i<49;i++) { a = _mm256_load_ps(_img + 8*i); c1 = _mm256_mul_ps(a, f[i]); c0 = _mm256_add_ps(c0, c1); } a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 49*8; } _mm256_zeroupper(); //delete [] f; } #else // no AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(2, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(3, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(4, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(5, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(7, _img, filter_ptr, out, outsize); } #endif // matrix class --------------------------------------------------- // should use opencv if available // class matrix { int _capacity; float *_x_mem; void delete_x() { if(_x_mem != NULL) delete[] _x_mem; x = NULL; _x_mem = NULL; } // 4 extra for alignment and 4 for 3 padding for SSE //float *new_x(const int size) { _x_mem = new float[size + 4+3]; x = (float *)(((uintptr_t)_x_mem + 16) & ~(uintptr_t)0x0F); return x; } // avx mem aligment float *new_x(const int size) { _x_mem = new float[size + 8 + 7]; x = (float *)(((uintptr_t)_x_mem + 32) & ~(uintptr_t)0x1F); return x; } public: std::string _name; int cols, rows, chans; int _size; int chan_stride; int chan_aligned; float *x; // size must be divisible by 8 for AVX virtual int calc_chan_stride(int w, int h) { if (chan_aligned) { int s = w*h; const int remainder = s % 8; if (remainder > 0) s += 8 - remainder; return s; } else return w*h; } matrix( ): cols(0), rows(0), chans(0), _size(0), _capacity(0), chan_stride(0), x(NULL), chan_aligned(0)/*, empty_chan(NULL)*/{} matrix( int _w, int _h, int _c=1, const float *data=NULL, int align_chan=0): cols(_w), rows(_h), chans(_c) { chan_aligned = align_chan; chan_stride = calc_chan_stride(cols, rows); _size= chan_stride*chans; _capacity=_size; x = new_x(_size); if(data!=NULL) memcpy(x,data,_size*sizeof(float)); } // copy constructor - deep copy matrix( const matrix &m) : cols(m.cols), rows(m.rows), chan_aligned(m.chan_aligned), chans(m.chans), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) {x = new_x(_size); memcpy(x,m.x,sizeof(float)*_size); /*empty_chan = new unsigned char[chans]; memcpy(empty_chan, m.empty_chan, chans);*/} // { v=m.v; x=(float*)v.data();} // copy and pad constructor matrix( const matrix &m, int pad_cols, int pad_rows, mojo::pad_type padding= mojo::zero, int threads=1) : cols(m.cols), rows(m.rows), chans(m.chans), chan_aligned(m.chan_aligned), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) { x = new_x(_size); memcpy(x, m.x, sizeof(float)*_size); *this = pad(pad_cols, pad_rows, padding, threads); } ~matrix() { if (x) delete_x(); } matrix get_chans(int start_channel, int num_chans=1) const { return matrix(cols,rows,num_chans,&x[start_channel*chan_stride]); } // if edge_pad==0, then the padded area is just 0. // if edge_pad==1 it fills with edge pixel colors // if edge_pad==2 it fills with median edge pixel color matrix pad(int dx, int dy, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { return pad(dx, dy, dx, dy, edge_pad, threads); } matrix pad(int dx, int dy, int dx_right, int dy_bottom, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { matrix v(cols+dx+dx_right,rows+dy+dy_bottom,chans);//,NULL,this->chan_aligned); v.fill(0); //float *new_x = new float[chans*w*h]; #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { const int v_chan_offset=k*v.chan_stride; const int chan_offset=k*chan_stride; // find median color of perimeter float median = 0.f; if (edge_pad == mojo::median_edge) { int perimeter = 2 * (cols + rows - 2); std::vector<float> d(perimeter); for (int i = 0; i < cols; i++) { d[i] = x[i+ chan_offset]; d[i + cols] = x[i + cols*(rows - 1)+ chan_offset]; } for (int i = 1; i < (rows - 1); i++) { d[i + cols * 2] = x[cols*i+ chan_offset]; // file from back so i dont need to cal index d[perimeter - i] = x[cols - 1 + cols*i+ chan_offset]; } std::nth_element(d.begin(), d.begin() + perimeter / 2, d.end()); median = d[perimeter / 2]; //for (int i = 0; i < v.rows*v.cols; i++) v.x[v_chan_offset + i] = solid_fill; } for(int j=0; j<rows; j++) { memcpy(&v.x[dx+(j+dy)*v.cols+v_chan_offset], &x[j*cols+chan_offset], sizeof(float)*cols); if(edge_pad== mojo::edge) { // do left/right side for(int i=0; i<dx; i++) v.x[i+(j+dy)*v.cols+v_chan_offset]=x[0+j*cols+chan_offset]; for (int i = 0; i<dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = x[(cols - 1) + j*cols + chan_offset]; } else if (edge_pad == mojo::median_edge) { for (int i = 0; i < dx; i++) v.x[i + (j + dy)*v.cols + v_chan_offset] = median; for (int i = 0; i < dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = median; } } // top bottom pad if(edge_pad== mojo::edge) { for(int j=0; j<dy; j++) memcpy(&v.x[(j)*v.cols+v_chan_offset],&v.x[(dy)*v.cols+v_chan_offset], sizeof(float)*v.cols); for (int j = 0; j<dy_bottom; j++) memcpy(&v.x[(j + dy + rows)*v.cols + v_chan_offset], &v.x[(rows - 1 + dy)*v.cols + v_chan_offset], sizeof(float)*v.cols); } if (edge_pad == mojo::median_edge) { for (int j = 0; j<dy; j++) for (int i = 0; i<v.cols; i++) v.x[i + j*v.cols + v_chan_offset] = median; for (int j = 0; j<dy_bottom; j++) for (int i = 0; i<v.cols; i++) v.x[i + (j + dy + rows)*v.cols + v_chan_offset] = median; } } return v; } matrix crop(int dx, int dy, int w, int h, int threads=1) const { matrix v(w,h,chans); #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { for(int j=0; j<h; j++) { memcpy(&v.x[j*w+k*v.chan_stride], &x[dx+(j+dy)*cols+k*chan_stride], sizeof(float)*w); } } return v; } mojo::matrix shift(int dx, int dy, mojo::pad_type edge_pad=mojo::zero) { int orig_cols=cols; int orig_rows=rows; int off_x=abs(dx); int off_y=abs(dy); mojo::matrix shifted= pad(off_x, off_y, edge_pad); return shifted.crop(off_x-dx, off_y-dy,orig_cols,orig_rows); } mojo::matrix flip_cols() { mojo::matrix v(cols,rows,chans); for(int k=0; k<chans; k++) for(int j=0; j<rows; j++) for(int i=0; i<cols; i++) v.x[i+j*cols+k*chan_stride]=x[(cols-i-1)+j*cols+k*chan_stride]; return v; } mojo::matrix flip_rows() { mojo::matrix v(cols, rows, chans); for (int k = 0; k<chans; k++) for (int j = 0; j<rows; j++) memcpy(&v.x[(rows-1-j)*cols + k*chan_stride],&x[j*cols + k*chan_stride], cols*sizeof(float)); return v; } void clip(float min, float max) { int s = chan_stride*chans; for (int i = 0; i < s; i++) { if (x[i] < min) x[i] = min; if (x[i] > max) x[i]=max; } } void min_max(float *min, float *max, int *min_i=NULL, int *max_i=NULL) { int s = rows*cols; int mini = 0; int maxi = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = t; i < t+s; i++) { if (x[i] < x[mini]) mini = i; if (x[i] > x[maxi]) maxi = i; } } *min = x[mini]; *max = x[maxi]; if (min_i) *min_i = mini; if (max_i) *max_i = maxi; } float mean() { const int s = rows*cols; int cnt = 0;// channel*s; float average = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = 0; i < s; i++) average += x[i + t]; } average = average / (float)(s*chans); return average; } float remove_mean(int channel) { int s = rows*cols; int offset = channel*chan_stride; float average=0; for(int i=0; i<s; i++) average+=x[i+offset]; average= average/(float)s; for(int i=0; i<s; i++) x[i+offset]-=average; return average; } float remove_mean() { float m=mean(); int s = chan_stride*chans; //int offset = channel*s; for(int i=0; i<s; i++) x[i]-=m; return m; } void fill(float val) { for(int i=0; i<_size; i++) x[i]=val; } void fill_random_uniform(float range) { ocall_fill_uniform(uint64_t (this), range); /* int i; int j; uint32_t seed; float value; sgx_read_rand((unsigned char *)&seed, sizeof(uint32_t)); printf("read rand in fill_random_uniform finished, seed: %d.\n", seed); for (i = 0; i < _size; i++) { x[i] = range*r4_uni ( &seed ); // printf("filled in x[%d]\n", i); }*/ /*unsigned char randnum[_size]; sgx_read_rand(randnum, _size); // sgx_read_rand((unsigned char *)x, sizeof(float)*_size); for (int i = 0; i<_size; i++) { x[i] = (randnum[i] + 0.0)*range/(256.0); // printf("range: %f, x[i]: %f\n", range, x[i]); }*/ } void fill_random_normal(float std) { ocall_fill_normal(uint64_t (this), std); /* float fn[128]; uint32_t kn[128]; int sample; uint32_t seed; float value; float wn[129]; r4_nor_setup ( kn, fn, wn ); seed = 0; sgx_read_rand((unsigned char *)&seed, sizeof(uint32_t)); printf("read rand in fill_random_normal finished, seed: %d.\n", seed); for (int i = 0; i < _size; i++) { x[i] = std*r4_nor(&seed, kn, fn, wn); // printf("filled in x[%d]\n", i); } */ // std::mt19937 gen(0); // std::normal_distribution<float> dst(0, std); // for (int i = 0; i<_size; i++) x[i] = dst(gen); } // deep copy inline matrix& operator =(const matrix &m) { resize(m.cols, m.rows, m.chans, m.chan_aligned); memcpy(x,m.x,sizeof(float)*_size); // memcpy(empty_chan, m.empty_chan, chans); return *this; } int size() const {return _size;} void resize(int _w, int _h, int _c, int align_chans=0) { chan_aligned = align_chans; int new_stride = calc_chan_stride(_w,_h); int s = new_stride*_c; if(s>_capacity) { if(_capacity>0) delete_x(); _size = s; _capacity=_size; x = new_x(_size); } cols = _w; rows = _h; chans = _c; _size = s; chan_stride = new_stride; } // dot vector to 2d mat inline matrix dot_1dx2d(const matrix &m_2d) const { mojo::matrix v(m_2d.rows, 1, 1); for(int j=0; j<m_2d.rows; j++) v.x[j]=dot(x,&m_2d.x[j*m_2d.cols],_size); return v; } // += inline matrix& operator+=(const matrix &m2){ for(int i = 0; i < _size; i++) x[i] += m2.x[i]; return *this; } // -= inline matrix& operator-=(const matrix &m2) { for (int i = 0; i < _size; i++) x[i] -= m2.x[i]; return *this; } #ifndef MOJO_AVX // *= float inline matrix operator *=(const float v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v; return *this; } #else inline matrix operator *=(const float v) { __m128 b; b = _mm_set_ps(v, v, v, v); for (int j = 0; j < _size; j += 4) _mm_store_ps(x + j, _mm_mul_ps(_mm_load_ps(x + j), b)); return *this; } #endif // *= matrix inline matrix operator *=(const matrix &v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v.x[i]; return *this; } inline matrix operator *(const matrix &v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v.x[i]; return T; } // * float inline matrix operator *(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v; return T; } // + float inline matrix operator +(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] + v; return T; } // + inline matrix operator +(matrix m2) { matrix T(cols,rows,chans); for(int i = 0; i < _size; i++) T.x[i] = x[i] + m2.x[i]; return T; } }; }// namespace
ConvolutionUnfold.h
#pragma once #include <string.h> #include <math.h> #include <algorithm> #include "General.h" #include "TensorRef.h" #include "Vector-inl.h" OPS_API int TS_Unfolded_Copy( TensorRef* finput, TensorRef* input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight); OPS_API int TS_Unfolded_Acc( TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight); //OPS_API int TS_SoftmaxGrad( // TensorRef* grad_, // TensorRef* adj_, // TensorRef* val_, // int rows, // int cols, // bool addGrad); //OPS_API int TS_IndexSelect( // TensorRef* result_, // TensorRef* src_, // TensorRef* indice_, // int rows, // int cols); //OPS_API int TS_IndexSelectGrad( // TensorRef* grad_, // TensorRef* adj_, // TensorRef* indice_, // int rows, // int cols); //template<typename T> //void SoftmaxGrad(TensorRef* grad_, TensorRef* adj_, TensorRef* val_, int rows, int cols, bool addGrad) { // // T * grad = (T*)grad_->buffer; // T * adj = (T*)adj_->buffer; // T * val = (T*)val_->buffer; // // for (int j = 0; j < rows; ++j) { // T * gradRow = grad + j * cols; // T * adjRow = adj + j * cols; // T * valRow = val + j * cols; // // T sum = 0.f; // for (int i = 0; i < cols; ++i) { // sum += valRow[i] * adjRow[i]; // } // // for (int i = 0; i < cols; ++i) { // if (addGrad) // { // gradRow[i] += valRow[i] * (adjRow[i] - sum); // } // else // { // gradRow[i] = valRow[i] * (adjRow[i] - sum); // } // } // } //} //template<typename T> //void IndexSelect(TensorRef* result_, TensorRef* src_, TensorRef* indice_, int rows, int cols) //{ // T* result = (T*)result_->buffer; // T* src = (T*)src_->buffer; // T* indice = (T*)indice_->buffer; // // for (int j = 0; j < rows; j++) { // // int srcIdx = indice[j]; // T* resultRow = result + j * cols; // T* srcRow = src + srcIdx * cols; // // for (int i = 0; i < cols; ++i) { // resultRow[i] = srcRow[i]; // } // } //} //template<typename T> //void IndexSelectGrad(TensorRef* grad_, TensorRef* adj_, TensorRef* indice_, int rows, int cols) //{ // T* grad = (T*)grad_->buffer; // T* adj = (T*)adj_->buffer; // T* indice = (T*)indice_->buffer; // // for (int j = 0; j < rows; j++) { // // int gradIdx = indice[j]; // T* adjRow = adj + j * cols; // T* gradRow = grad + gradIdx * cols; // // for (int i = 0; i < cols; ++i) { // gradRow[i] += adjRow[i]; // } // } //} // note: due to write issues, this one cannot be parallelized as well as unfolded_copy template<typename T> void unfolded_acc( TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { size_t nip; T *input_data = (T*)input->buffer; T *finput_data = (T*)finput->buffer; #pragma omp parallel for private(nip) for (nip = 0; nip < nInputPlane; nip++) { size_t kw, kh, y, x; __int64 ix = 0, iy = 0; for (kh = 0; kh < kH; kh++) { for (kw = 0; kw < kW; kw++) { T *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); T *dst = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { size_t lpad, rpad; for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { } else { if (dW == 1) { ix = (__int64)(0 - padW + kw); lpad = std::max(size_t(0), (padW - kw)); rpad = std::max(size_t(0), (padW - (kW - kw - 1))); Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + lpad), src + (size_t)(y*outputWidth + lpad), 1, outputWidth - lpad - rpad); } else { for (x = 0; x<outputWidth; x++) { ix = (__int64)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) { } else Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth + x), 1, 1); } } } } } else { for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH + kh); ix = (__int64)(0 + kw); if (dW == 1) Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth), 1, outputWidth); else { for (x = 0; x < outputWidth; x++) Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + x*dW), src + (size_t)(y*outputWidth + x), 1, 1); } } } } } } } template<typename T> void unfolded_copy(TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; T *input_data = (T*)input->buffer; T *finput_data = (T*)finput->buffer; #pragma omp parallel for private(k) for (k = 0; k < nInputPlane*kH*kW; k++) { size_t nip = k / (kH*kW); size_t rest = k % (kH*kW); size_t kh = rest / kW; size_t kw = rest % kW; size_t x, y; __int64 ix, iy; T *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); T *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { size_t lpad, rpad; for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { memset(dst + y*outputWidth, 0, sizeof(T)*outputWidth); } else { if (dW == 1) { ix = (__int64)(0 - padW + kw); lpad = std::max(size_t(0), (padW - kw)); rpad = std::max(size_t(0), (padW - (kW - kw - 1))); if (outputWidth - rpad - lpad <= 0) { memset(dst + (size_t)(y*outputWidth), 0, sizeof(T)*outputWidth); } else { if (lpad > 0) memset(dst + y*outputWidth, 0, sizeof(T)*lpad); memcpy(dst + (size_t)(y*outputWidth + lpad), src + (size_t)(iy*inputWidth + ix + lpad), sizeof(T)*(outputWidth - rpad - lpad)); if (rpad > 0) memset(dst + y*outputWidth + outputWidth - rpad, 0, sizeof(T)*rpad); } } else { for (x = 0; x<outputWidth; x++) { ix = (__int64)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) memset(dst + (size_t)(y*outputWidth + x), 0, sizeof(T) * 1); else memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix), sizeof(T)*(1)); } } } } } else { for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH + kh); ix = (__int64)(0 + kw); if (dW == 1) memcpy(dst + (size_t)(y*outputWidth), src + (size_t)(iy*inputWidth + ix), sizeof(T)*outputWidth); else { for (x = 0; x<outputWidth; x++) memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix + x*dW), sizeof(T)*(1)); } } } } }
vla_crash.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ // RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 // RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" int a; void foo() { int(*b)[a]; int *(**c)[a]; #pragma omp parallel if (0) b[0][0] = c[0][a][0][a]; } void bar(int n, int *a) { // expected-warning@+1 {{incompatible pointer types initializing 'int (*)[n]' with an expression of type 'int **'}} int(*p)[n] = &a; #pragma omp parallel if(0) // expected-warning@+1 {{comparison of distinct pointer types ('int (*)[n]' and 'int **')}} if (p == &a) { } } // CHECK1-LABEL: define {{[^@]+}}@foo // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[B:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[C:%.*]] = alloca i32***, align 8 // CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK1-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[B]], i64 [[TMP4]], i32**** [[C]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i64 noundef [[VLA1:%.*]], i32**** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32****, align 8 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store i32**** [[C]], i32***** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i32****, i32***** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i32***, i32**** [[TMP3]], align 8 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32**, i32*** [[TMP4]], i64 0 // CHECK1-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK1-NEXT: [[TMP7:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32*, i32** [[TMP5]], i64 [[TMP7]] // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32*, i32** [[ARRAYIDX3]], i64 0 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[ARRAYIDX4]], align 8 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* @a, align 4 // CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP9]] to i64 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM5]] // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP1]], align 8 // CHECK1-NEXT: [[TMP12:%.*]] = mul nsw i64 0, [[TMP0]] // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[TMP12]] // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[ARRAYIDX7]], i64 0 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX8]], align 4 // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@bar // CHECK1-SAME: (i32 noundef signext [[N:%.*]], i32* noundef [[A:%.*]]) #[[ATTR0]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[P:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32** [[A_ADDR]] to i32* // CHECK1-NEXT: store i32* [[TMP3]], i32** [[P]], align 8 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK1-NEXT: call void @.omp_outlined..1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[P]], i32** [[A_ADDR]]) #[[ATTR2]] // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[P:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[P_ADDR:%.*]] = alloca i32**, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i32** [[P]], i32*** [[P_ADDR]], align 8 // CHECK1-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[P_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32** [[TMP2]] to i32* // CHECK1-NEXT: [[CMP:%.*]] = icmp eq i32* [[TMP3]], [[TMP4]] // CHECK1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] // CHECK1: if.then: // CHECK1-NEXT: br label [[IF_END]] // CHECK1: if.end: // CHECK1-NEXT: ret void //
spgsolver.h
/* Algorithm for Steiner Problem in Graphs Copyright (c) Microsoft Corporation All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <cstdint> #include <cstdio> #include <cstdlib> #include <iostream> #include <iomanip> #include <fstream> #include <vector> #include <algorithm> #include "binheap.h" #include "graph.h" #include "solution.h" #include "rfw_timer.h" #include "uf.h" #include "uset.h" #include "voronoi.h" #include "rfw_random.h" #include "rfw_stack.h" #include "drawer.h" #include "dual.h" #include "stedgelinear.h" #include "pairheap.h" #include "buckets.h" #include <cstring> #include <cmath> #include "elite.h" #include "spgconfig.h" #include <omp.h> #include "constructive.h" #include "execution_log.h" #include "LSVertexInsertion.h" #include "LSVertexElimination.h" #include "LSBasics.h" #include "LSKeyPath.h" #include "BranchBound.h" #include "perturbation.h" #include "preprocess.h" using namespace std; class SPGSolver { private: static void fatal (const string &msg) { fprintf (stdout, "ERROR: %s.\n", msg.c_str()); fflush(stdout); exit(-1); } static void warning (const string &msg) { fprintf (stdout, "%s", msg.c_str()); fflush(stdout); } /* static void ShowUsage() { fprintf (stdout, "Usage: steiner <filename> <upper bound> [-prep 1] [-seed <s>]\n"); }*/ static void ExtractFilename (char *filename, char *name) { strcpy (name, filename); //fprintf (stdout, "<%s> ", name); //exit(-1); char *lastsep = NULL; char *lastdot = NULL; char *end = &name[strlen(name)]; char *p = name; for (p=name; p<=end; p++) { if (*p=='/' || *p=='\\') lastsep = p; if (*p=='.') lastdot = p; } if (lastsep == NULL) { lastsep = name; } else { lastsep ++; } if (lastdot == NULL) { lastdot = end; } // avoid weird cases if (lastdot <= lastsep) { lastdot = end; lastsep = name; } *lastdot = 0; int i = 0; while (1) { name[i] = *lastsep; if (name[i] == 0) break; i++; lastsep ++; } //fprintf (file, "graph %s\n", lastsep); //fprintf (file, "graph %s\n", name); //delete [] name; } static void OutputGraphStats (FILE *file, Graph &g, char *filename) { fprintf (file, "file %s\n", filename); fprintf (file, "nvertices %d\n", g.VertexCount()); fprintf (file, "nedges %d\n", g.EdgeCount()); fprintf (file, "nterminals %d\n", g.TerminalCount()); } public: static EdgeCost ReadBound(FILE *logfile, char *graphname) { EdgeCost answer = -1; FILE *file = fopen ("bounds.txt", "r"); if (!file) { fprintf (stdout, "Could not find bounds file.\n"); fflush (stdout); return answer; } fprintf (stdout, "Reading bounds file... "); fflush (stdout); double solution; const int BUFSIZE = 65534; //1048574; char buffer [BUFSIZE+2]; char bufseries [BUFSIZE+2]; char bufclass [BUFSIZE+2]; char bufinstance [BUFSIZE+2]; sprintf (bufseries, "undefined"); sprintf (bufclass, "undefined"); while (fgets(buffer, BUFSIZE, file)!=0) { //fprintf (stdout, "<%s> ", buffer); if (sscanf (buffer, "#series %s", bufseries) > 0) { continue; } if (sscanf (buffer, "#class %s", bufclass) > 0) { continue; } if (sscanf (buffer, "%s %lg", bufinstance, &solution)>0) { if (strcmp (bufinstance, graphname) == 0) { answer = (EdgeCost)solution; fprintf (logfile, "instance %s\n", graphname); fprintf (logfile, "series %s\n", bufseries); fprintf (logfile, "class %s\n", bufclass); fprintf (logfile, "bestknown %.20f\n", (double)solution); } } } fclose(file); //fprintf (stdout, "done (solution is %.0f).\n", (double)answer); //fflush (stdout); return answer; } typedef enum { MS_PLAIN, MS_COMBINATION, MS_BINARY, MS_MULTILEVEL, MS_TIMEBOUNDEDCOMBINATION, MS_TIMEBOUNDEDMULTILEVEL, MS_TIMEBOUNDEDADAPTIVE, MS_NUMBER } MSType; static void ShowUsage () { fprintf (stdout, "Usage: steiner <stp_file> [-bb] [-ub] [-prep] [-msit] [-seed] [-mstype]\n"); fprintf (stdout, "Valid types: plain(%d) combination(%d) binary(%d) multilevel(%d)\n", MS_PLAIN, MS_COMBINATION, MS_BINARY, MS_MULTILEVEL); exit (-1); } static void RunMultistart(Graph &g, int mstype, int msit, EdgeCost &gbestfound, EdgeCost &bestknown, SteinerConfig &config, char *name, GlobalInfo *ginfo, ExecutionLog *executionLogPtr, SteinerSolution *outSolution) { double mstime = 0; EdgeCost mscost = g.GetFixedCost(); int elite = -1; EdgeCost bestfound = gbestfound; if (g.EdgeCount()>0 && g.TerminalCount()>1) { fprintf(stdout, "MULTISTARTING WITH %d\n", mstype); mscost = INFINITE_COST; // If the multistart type is the time bounded combination multistart type, do not do the // incremental number of iterations. if (mstype == MS_TIMEBOUNDEDCOMBINATION || mstype == MS_TIMEBOUNDEDMULTILEVEL || mstype == MS_TIMEBOUNDEDADAPTIVE) { SteinerSolution solution(&g); RFWTimer mstimer(true); TimeBoundedMultistart(solution, mstype, msit, config.OUTPUT_INCUMBENT ? name : NULL, -1, &config, ginfo, executionLogPtr); mstime = mstimer.getTime(); if (outSolution != nullptr) { outSolution->CopyFrom(&solution); mscost = solution.GetCost(); } } else { int doneit = 0; for (int maxit = (config.TIME_LIMIT <= 0 ? (msit <= 0 ? 2 : msit) : 2); (msit <= 0 || doneit < msit) && (config.TIME_LIMIT <= 0 || executionLogPtr->timerPtr->getTime() < config.TIME_LIMIT); maxit *= 2) { SteinerSolution solution(&g); int curit = maxit; if (msit > 0 && doneit + maxit > msit) curit = msit - doneit; fprintf(stdout, "RUNNING %d ITERATIONS (%d ALREADY DONE IN %.2f SEC).\n", curit, doneit, executionLogPtr->timerPtr->getTime()); RFWTimer mstimer(true); switch (mstype) { case MS_PLAIN: PlainMultistart(solution, curit, -1, &config); break; case MS_COMBINATION: CombinationMultistart(solution, curit, elite, config.OUTPUT_INCUMBENT ? name : NULL, -1, &config, ginfo, executionLogPtr); break; case MS_BINARY: BinaryMultistart(solution, curit, name, &config); break; case MS_MULTILEVEL: MultilevelMultistart(solution, curit, curit, elite, config.OUTPUT_INCUMBENT ? name : NULL, -1, &config); break; }; doneit += curit; mstime += mstimer.getTime(); if (solution.GetCost() < mscost) { fprintf(stdout, "FOUND BETTER SOLUTION. IMPROVING COST FROM %.0f TO %.0f.\n", mscost, solution.GetCost()); mscost = solution.GetCost(); if (outSolution != nullptr && (outSolution->GetCost() > solution.GetCost() || outSolution->EdgeCount() == 0)) { outSolution->CopyFrom(&solution); } } //if (outSolution != nullptr && (outSolution->GetCost() > solution.GetCost() || outSolution->EdgeCount() == 0)) { // outSolution->CopyFrom(&solution); // fprintf(stdout, "FOUND BETTER SOLUTION. IMPROVING COST FROM %.0f TO %.0f.\n", mscost, solution.GetCost()); // mscost = solution.GetCost(); //} } } } if (mscost < bestfound) bestfound = mscost; fprintf (stdout, "Solution is %.12f.\n", (double)mscost); /* double ratio = (double)mscost / (double)bestknown; double error = ratio - 1; fprintf (stdout, "ratio %.12f\n", ratio); fprintf (stdout, "error %.12f\n", error); fprintf (stdout, "pcterror %.12f\n", 100.0 * error); */ Basics::ReportResults(stdout, "ms", mstime, mscost, bestknown); fprintf (stdout, "msiterations %d\n", msit); fprintf(stdout, "mstype %d\n", mstype); //fprintf (stdout, "mssolution %.0f\n", mscost); //fprintf (stdout, "mstimeseconds %.12f\n", mstime); gbestfound = bestfound; } static void Solve (int argc, char **argv) { const uint64_t version = 201706010852; cout << "version " << version << endl << "precision " << fixed << setprecision(20) << EDGE_COST_PRECISION << endl; if (argc < 2) ShowUsage(); Graph g; g.ReadSTP(argv[1]); char *filename = argv[1]; char *name = new char[strlen(filename)+1]; ExtractFilename(filename, name); OutputGraphStats (stdout, g, argv[1]); fprintf (stdout, "graph %s\n", name); EdgeCost bestknown = ReadBound(stdout, name); EdgeCost bestfound = INFINITE_COST; //ReadBound("bounds.txt", ); EdgeCost solcost = 0; bool MSTPRUNE = false; bool PREPROCESS = false; bool SAFE_PREPROCESS = false; bool DUALASCENT = false; bool BRANCHBOUND = false; bool LOCALSEARCH = false; bool MULTISTART = false; bool BINARYMULTISTART = true; int mstype = MS_COMBINATION; int msit = 0; int seed = 17; EdgeCost primal = INFINITE_COST; //fprintf (stdout, "ARGC is %d\n", argc); SteinerConfig config; for (int i=2; i<argc; i+=2) { if (i == argc-1) ShowUsage(); if (strcmp(argv[i], "-ub")==0) { primal = atoi(argv[i+1]); fprintf (stdout, "Setting upper bound to %.0f.\n", primal); continue; } if (strcmp(argv[i], "-bb")==0) { if (atoi(argv[i+1])==0) { BRANCHBOUND = false; } else { BRANCHBOUND = true; } continue; } if (strcmp(argv[i], "-prep")==0) { if (atoi(argv[i+1])!=0) { fprintf (stdout, "Will do preprocessing.\n"); PREPROCESS = true; if (atoi(argv[i + 1]) > 1) SAFE_PREPROCESS = true; } continue; } if (strcmp(argv[i], "-ls")==0) { if (atoi(argv[i+1])!=0) { fprintf (stdout, "Will do localsearch.\n"); LOCALSEARCH = true; } continue; } if (strcmp(argv[i], "-bms")==0) { if (atoi(argv[i+1])!=0) { fprintf (stdout, "Will do binary.\n"); BINARYMULTISTART = true; } continue; } if (strcmp(argv[i], "-msit")==0) { msit = (atoi(argv[i+1])); fprintf (stdout, "Will run %d multistart iterations.\n", msit); continue; } if (strcmp(argv[i], "-mstype")==0) { mstype = (atoi(argv[i+1])); fprintf (stdout, "Will run multistart type %d.\n", mstype); assert(mstype != 14); // This is corrupt (BB with TimeBoundedMultistart) continue; } if (strcmp(argv[i], "-seed")==0) { seed = atoi(argv[i+1]); fprintf (stdout, "Set seed to %d.\n", seed); continue; } //maybe config knows what to do with this parameter config.ReadParameter (argv[i], argv[i+1]); } fflush (stdout); if (config.EARLY_STOP_BOUND < 0) {config.EARLY_STOP_BOUND = bestknown;} bestfound = primal; config.Output(stdout); fprintf (stdout, "seed %d\n", seed); // if there is a best known solution, output it whenever we find it if (config.OUTPUT_THRESHOLD<0 && bestknown>=0) { config.OUTPUT_THRESHOLD = bestknown; } RFWTimer timer(true); // solution cost log. ExecutionLog executionLog(&g, &timer, config.TIME_LIMIT); MULTISTART = (msit != 0); RFWRandom::randomize(seed); bool APPLY_PERTURBATION = false; if (APPLY_PERTURBATION) { fprintf (stdout, "Applying perturbation... "); int m = g.EdgeCount(); vector<EdgeCost> pertcost (m+1,-1); RFWLocalRandom random(seed+17); PerturbationTools::ApplyPerturbation(g, pertcost, random, 1, 1.0001); g.ApplyCosts(pertcost); for (int i=1; i<std::min(10, m); i++) { fprintf (stdout, "%.5f ", (double)g.GetCost(i)); } fprintf (stdout, "done.\n"); } if (PREPROCESS) { fprintf (stdout, "Should be preprocessing.\n"); RFWTimer preptime(true); Preprocessing::RunPreprocessing(g, !SAFE_PREPROCESS); fprintf (stdout, "preptime %.6f\n", preptime.getTime()); fprintf (stdout, "prepfixed %.6f\n", g.GetFixedCost()); //PrepBottleneck(g); //exit(-1); } bool GUARDED_MULTISTART = false; if (mstype > MS_NUMBER) { GUARDED_MULTISTART = true; mstype = mstype % 10; fprintf (stdout, "Will run guarded mode %d.\n", mstype); } double second_time = 0; double first_time; SteinerSolution bestSolution(&g); if (MULTISTART && GUARDED_MULTISTART) { fprintf (stdout, "There are %d threads, %d processes.\n", omp_get_max_threads(), omp_get_num_procs()); GlobalInfo ginfo; ginfo.fixed = g.GetFixedCost(); config.DEPTH_LIMIT = 128; fprintf (stdout, "Setting depth limit to %d.\n", config.DEPTH_LIMIT); omp_set_num_threads(2); RFWTimer ftimer(true); #pragma omp parallel { Graph thread_g = g; EdgeCost thread_bestfound = bestfound; fprintf (stdout, "<%d> ", omp_get_thread_num()); if (omp_get_thread_num() == 0) { RunMultistart(thread_g, mstype, msit, thread_bestfound, bestknown, config, name, &ginfo, &executionLog, nullptr); fprintf (stdout, "DONE RUNNING MULTISTART AND FOUND %.3f\n", thread_bestfound); ginfo.MakeSolved(); } else if (omp_get_thread_num() == 1) { RFWTimer stimer(true); fprintf (stdout, "Should be running something smarter here.\n"); int bbseed = seed; if (bbseed > 0) bbseed = -bbseed; BranchBound::RunBranchAndBound(thread_g, bbseed, thread_bestfound, thread_bestfound, bestknown, config, &ginfo, &executionLog); second_time += stimer.getTime(); fprintf (stdout, "DONE RUNNING BRANCHING-AND-BOUND!\n"); if (!ginfo.bbpruned) ginfo.MakeSolved(); else fprintf (stdout, "Did not find the optimal solution, though.\n"); } #pragma omp critical { if (thread_bestfound < bestfound) { bestfound = thread_bestfound; fprintf (stdout, "THREAD %d UPDATED TO %.3f\n", omp_get_thread_num(), bestfound); } } } #pragma omp barrier { fprintf (stdout, "Done with this.\n"); } first_time = ftimer.getTime(); fprintf (stdout, "bbpruned %d\n", ginfo.bbpruned); MULTISTART = false; BRANCHBOUND = false; } if (MULTISTART) { RunMultistart(g, mstype, msit, bestfound, bestknown, config, name, NULL, &executionLog, &bestSolution); } if (LOCALSEARCH) { int maxit = 10; for (int m=0; m<5; m++) { double besttime = 99999999; fprintf (stdout, "Running %d... ", m); fflush(stdout); if (m==3) { fprintf (stdout, "WARNING! SKIPPING METHOD 3.\n"); continue; } //if (m != 2) continue; for (int i=0; i<maxit; i++) { RFWTimer timer(true); SteinerSolution solution(&g); switch (m) { case 0: MSTPrim (g, solution); break; case 1: MSTKruskal (g, solution); break; case 2: ConstructiveAlgorithms::SPH (g, solution, NULL, 1); break; case 3: FullBoruvka (g, solution); break; case 4: TestLocalSearch (g, solution); break; } if (m>=2 && MSTPRUNE) { MSTPrune(g,solution); } solcost = solution.GetCost(); double t = timer.getTime(); if (t < besttime) besttime = t; } fprintf (stdout, "Method %d found solution of cost %d in %.3f milliseconds (best of %d runs).\n", m, solcost, besttime * 1000.0, maxit); fflush(stdout); } } if (BRANCHBOUND) { BranchBound::RunBranchAndBound(g, seed, primal, bestfound, bestknown, config, NULL, &executionLog); } double walltime = timer.getTime(); fprintf (stdout, "totalwalltimeseconds %.12f\n", walltime); //fprintf (stdout, "totaltimeseconds %.12f\n", walltime + second_time); //fprintf (stdout, "bestsolution %.0f\n", bestfound); Basics::ReportResults(stdout, "total", walltime + first_time, bestfound, bestknown); Basics::ReportResults(stdout, "totalcpu", walltime + second_time, bestfound, bestknown); // Dump official output logs. if (!config.LOG_FILENAME.empty()) { ofstream logFile(config.LOG_FILENAME.c_str()); if (logFile.is_open()) { logFile << "SECTION Comment" << endl << "Name \"" << name << "\"" << endl << "Problem \"SPG\"" << endl << "Program \"puw\"" << endl << "Version \"" << version << "\"" << endl << "End" << endl << endl << "SECTION Solutions" << endl; for (size_t i = 0; i < executionLog.solCost.size(); ++i) { logFile << "Solution " << fixed << executionLog.solCost[i].second << " " << fixed << executionLog.solCost[i].first << endl; } logFile << "End" << endl << endl << "SECTION Run" << endl << "Threads 1" << endl << "Time " << fixed << walltime << endl << "Dual 0" << endl; if (executionLog.solCost.empty()) logFile << "Primal inf" << endl; else logFile << "Primal " << fixed << executionLog.bestSolution.GetCost() << endl; logFile << "End" << endl << endl; if (!executionLog.solCost.empty()) { logFile << "SECTION Finalsolution" << endl; size_t numVertices = 0; for (int v = 1; v <= g.VertexCount(); ++v) { if (executionLog.bestSolution.GetDegree(v) > 0 || g.IsTerminal(v)) ++numVertices; } logFile << "Vertices " << numVertices << endl; for (int v = 1; v <= g.VertexCount(); ++v) { if (executionLog.bestSolution.GetDegree(v) > 0 || g.IsTerminal(v)) logFile << "V " << v << endl; } logFile << "Edges " << executionLog.bestSolution.EdgeCount() << endl; for (size_t e = 1; e <= g.EdgeCount(); ++e) { if (executionLog.bestSolution.Contains(e)) logFile << "E " << g.GetFirstEndpoint(e) << " " << g.GetSecondEndpoint(e) << endl; } logFile << "End" << endl; } logFile.close(); } } executionLog.bestSolution.Output("steinerSolu.txt"); delete [] name; } static void CombineSolutions(SteinerSolution &target, SteinerSolution &sa, SteinerSolution &sb, RFWLocalRandom &random, SteinerConfig *config) { Graph &g = *sa.g; int n = g.VertexCount(); int m = g.EdgeCount(); const bool verbose = false; /* UniverseSet baselist = new UniverseSet(n); VoronoiData voronoi = new VoronoiData(n); UnionFind uf = new UnionFind(n); SteinerSolution solution = new SteinerSolution(g); BinaryHeap<ArcCost> heap = new BinaryHeap<ArcCost>(n); ArcCost [] pertcost = new ArcCost [m+1]; SteinerSolution target = new SteinerSolution(g); Console.Error.Write("{0} x {1}:", sa.GetCost(), sb.GetCost()); */ vector<EdgeCost> pertcost(m+1,-1); //was: 1000, (100,500), 1 for (int e = 1; e <= m; e++) { int tcount = 0; if (sa.Contains(e)) tcount++; if (sb.Contains(e)) tcount++; int mult = 1; if (tcount == 0) mult = 1000; //random.GetInteger(200,300); //edge in neither solution: very expensive else if (tcount == 1) mult = random.GetInteger(100, 500); //split edge: intermediate cost else { mult = 1; } //random.GetInteger(100,200); } //edge in both: keep it pertcost[e] = g.GetCost(e) * mult; // g.GetCost(a); } int root = Basics::PickRandomTerminal(g, random); ConstructiveAlgorithms::SPH (g, target, &pertcost[0], root); MSTPrune(g,target); RunLocalSearch(g, target, random, -1, config); if (verbose) fprintf (stdout, "%d x %d -> %d\n", sa.GetCost(), sb.GetCost(), target.GetCost()); /* baselist.Reset(); for (int v = 1; v <= n; v++) {if (g.IsTerminal(v)) baselist.Insert(v);} ComputeVoronoi(voronoi, baselist, heap, pertcost); uf.Reset(); target.Reset(); Boruvka(target, voronoi, uf, pertcost); ArcCost borcost = target.GetCost(); FullLocalSearch(target); ArcCost newcost = target.GetCost(); Console.Error.WriteLine(" {0}", newcost); return target; }*/ } static void BinaryMultistart (SteinerSolution &solution, int maxit, char *outprefix, SteinerConfig *config) { int nlevels = 32; vector<SteinerSolution*> levelsol (nlevels, NULL); //solution[i]: solution obtained by combining 2^i solutions Graph &g = *solution.g; int n = g.VertexCount(); int m = g.EdgeCount(); RFWTimer timer(true); RFWLocalRandom random (RFWRandom::getInteger(1,999999999)); EdgeCost bestcost = 999999999; const bool verbose =false; bool USE_PERTURBATION = true; bool ADAPTIVE_PERTURBATION = false; fprintf (stdout, "Running multistart for %d iterations and %d levels (perturbation=%d).\n", maxit, nlevels, USE_PERTURBATION); fflush(stdout); vector<EdgeCost> pertcost (m+1,-1); //bool USE_PERTURBATION = true; //bool ADAPTIVE_PERTURBATION = false; SteinerSolution cursol(&g); SteinerSolution bestsol(&g); SteinerSolution combsol(&g); //EdgeCost bestcost = 999999999; for (int i=0 ; i<maxit; i++) { int root = random.GetInteger(1,n); //PickRandomTerminal(g, random); if (USE_PERTURBATION) { PerturbationTools::InitPerturbation(g, pertcost, random, config); } ConstructiveAlgorithms::SPH (g, cursol, USE_PERTURBATION ? &pertcost[0] : NULL, root); MSTPrune(g,cursol); RunLocalSearch(g, cursol, random, -1, config); if (cursol.GetCost() < bestcost) { bestcost = cursol.GetCost(); bestsol.CopyFrom(&cursol); } int j; for (j=0; j<nlevels; j++) { fprintf (stdout, "%10d : %2d : %.0f : ", (int)i, j, bestcost); if (levelsol[j] == NULL) { fprintf (stdout, "+%.0f\n", cursol.GetCost()); levelsol[j] = new SteinerSolution(&cursol); break; } // so there is a solution at level j //SteinerSolution *refsol = elite.GetReference(random.GetInteger(1, elite.Count())); int maxtries = 5; int t; for (t=0; t<maxtries; t++) { CombineSolutions(combsol, cursol, *levelsol[j], random, config); //fprintf (stdout, "%.0f x %.0f -> %.0f\n", cursol.GetCost(), levelsol[j]->GetCost(), combsol.GetCost()); if (combsol.GetCost() < cursol.GetCost() && combsol.GetCost() < levelsol[j]->GetCost()) { break; //cursol.CopyFrom(&combsol); } } fprintf (stdout, "<%d>", t); if (combsol.GetCost() > cursol.GetCost()) { combsol.CopyFrom(&cursol); fprintf (stdout, "a"); //fprintf (stdout, "!"); } if (combsol.GetCost() > levelsol[j]->GetCost()) { combsol.CopyFrom(levelsol[j]); fprintf (stdout, "b"); } cursol.CopyFrom(&combsol); delete levelsol[j]; levelsol[j] = NULL; //if (cursol.GetCost() < cursol. if (cursol.GetCost() < bestcost) { bestcost = cursol.GetCost(); bestsol.CopyFrom(&cursol); } } if (i%10==0) fflush(stdout); if (j==nlevels) { fprintf (stdout, "Ran out of levels!\n"); break; } } for (int i=0; i<nlevels; i++) { if (levelsol[i]) delete levelsol[i]; } solution.CopyFrom(&bestsol); } static void EliteMultistart (SteinerSolution &solution, int maxit, int capacity, char *outprefix, SteinerConfig *config) { Graph &g = *solution.g; SteinerSolution bestsol(&g); SteinerSolution combsol(&g); EdgeCost bestcost = INFINITE_COST; RFWLocalRandom random (RFWRandom::getInteger(1,999999999)); SolutionPool elite(maxit); for (int i=0; i<999999; i++) { CombinationMultistart(solution, maxit, capacity, NULL, 0); EdgeCost curcost = solution.GetCost(); fprintf (stdout, "Should be adding solution %.0f to capacity %d.\n", (double)curcost, capacity); fflush(stdout); int pos1 = elite.Add(&solution); //int pos1 = -1; CascadedCombination(solution, combsol, elite, -1, random, config); curcost = solution.GetCost(); int pos2 = elite.Add(&solution); fprintf (stdout, "[%d,%d:%.0f] ", pos1, pos2, (double)solution.GetCost()); if (curcost < bestcost) { bestsol.CopyFrom(&solution); bestcost = curcost; } fprintf (stdout, "Iteration %d: %.0f\n", i, (double)bestsol.GetCost()); elite.Output(stdout, 8); } solution.CopyFrom(&bestsol); } //-------------------------- // Repeatedly combine initial solution with others from the elite set, then add the result to elite set itself. // Combinations continue until the algorithm fails to improve 'maxfail' times. static void CascadedCombination(SteinerSolution &solution, SteinerSolution &combsol, SolutionPool &elite, int maxfail, RFWLocalRandom &random, SteinerConfig *config) { if (maxfail < 0) maxfail = config->MAX_COMB_FAIL; int failures_to_go = maxfail; const bool verbose = false; if (verbose) fprintf (stdout, "%d->", solution.GetCost()); while (failures_to_go > 0) { SteinerSolution *refsol = elite.GetReference(random.GetInteger(1, elite.Count())); CombineSolutions(combsol, solution, *refsol, random, config); if (!combsol.IsBetter(&solution)) { failures_to_go --; } else { solution.CopyFrom(&combsol); } } if (verbose) fprintf (stdout, "%d\n", solution.GetCost()); elite.Add(&solution); } static void AddSmallPerturbation (Graph &g) { fatal ("deprecated function"); int m = g.EdgeCount(); vector<EdgeCost> pertcost(m+1); for (int e=1; e<=m; e++) { EdgeCost c = 10000 * g.GetCost(e) + RFWRandom::getInteger(0,10); pertcost[e] = c; } g.ApplyCosts(pertcost); } struct DistancePair { int source; EdgeCost distance; }; class ClosenessData { private: int k; int maxid; void GetBounds (int v, int &first, int &last) { first = v*maxid; last = (v+1)*maxid; } public: vector<DistancePair> data; ClosenessData(int _k, int _maxid) { k = _k; maxid = _maxid; data.resize(k*maxid+1); } }; static void FindKClosest (Graph &g, int k, vector<int> sources, ClosenessData &cdata) { int n = g.VertexCount(); for (int v=1; v<=n; v++) { //cdata[ } } static void KeyVertexInsertion (SteinerSolution &solution, RFWLocalRandom &random) { //return; Graph &g = *solution.g; int n = g.VertexCount(); SteinerSolution tempsol(&g); RFWStack<int,true> tempterm(n+1); //fprintf (stdout, "Should be finding improvements!\n"); fprintf (stdout, "k"); const bool MOVE_SIDEWAYS = true; vector<int> perm(n+1); for (int v=1; v<=n; v++) {perm[v] = v;} for (int i=1; i<n; i++) { int j = random.GetInteger(i,n); std::swap(perm[i], perm[j]); } int improvements = 1; while (improvements > 0) { improvements = 0; //for (int v=1; v<=n; v++) { for (int p=1; p<=n; p++) { int v = perm[p]; //RFWRandom::getInteger(1,n); //warning! Should have a real permutation. //fprintf (stdout, "%d ", v); if (g.IsTerminal(v)) continue; //if (!solution.Contains(v)) continue; //MUCH WEAKER VERSION OF THE SEARCH /* if (solution.GetDegree(v) <= 2) { //fprintf (stdout, "."); continue; //WARNING: THIS IS FOR TESTING ONLY }*/ //fprintf (stdout, "v%d ", v); tempterm.reset(); //fflush (stdout); // push all current terminals for (int w=1; w<=n; w++) { if (g.IsTerminal(w)) continue; if (solution.GetDegree(w) <= 2) continue; //fprintf (stdout, "t%d:%d ", w, solution.GetDegree(w)); tempterm.push(w); } //fprintf (stdout, "Created %d new terminals.\n", tempterm.getNElements()); // push v itself (if not already pushed) //if (solution.GetDegree(v) <= 2) tempterm.push(v); int oldt = g.TerminalCount(); //fprintf (stdout, "oldt=%d ", g.TerminalCount()); for (int i=1; i<=tempterm.getNElements(); i++) { int w = tempterm.peek(i); //fprintf (stdout, "x"); if (g.IsTerminal(w)) fatal ("something wrong!\n"); g.MakeTerminal(w); if (g.TerminalCount() == oldt) fatal ("insertion did nothing"); } //fprintf (stdout, "newt=%d ", g.TerminalCount()); tempsol.Reset(); //fprintf (stdout, "%.0f+%.0f = ", solution.GetCost(), tempsol.GetCost()); ConstructiveAlgorithms::SPH(g, tempsol, NULL, v); //fprintf (stdout, "%.0f>>%.0f ", oldcost, newcost); for (int i=1; i<=tempterm.getNElements(); i++) { int w = tempterm.peek(i); g.UnmakeTerminal(w); } MSTPrune(g, tempsol); EdgeCost oldcost = solution.GetCost(); EdgeCost newcost = tempsol.GetCost(); //if (newcost - oldcost > 0) fprintf (stdout, "%.0f ", newcost - oldcost); //fflush(stdout); double improvement = oldcost - newcost; // remember the new solution if there is an improvement or a tie (if MOVE_SIDEWAYS is true) if (improvement > EDGE_COST_PRECISION || (MOVE_SIDEWAYS && (improvement > -EDGE_COST_PRECISION))) { solution.CopyFrom(&tempsol); if (improvement > EDGE_COST_PRECISION) { fprintf (stdout, "."); improvements ++; fprintf (stdout, "i%.0f ", newcost); fflush(stdout); return; } } } } } static void DecayLocalSearch (SteinerSolution &solution, vector<EdgeCost> &original, RFWLocalRandom &random, int decaysteps, double exponent, SteinerConfig *config, ExecutionLog *executionLogPtr = nullptr) { Graph &g = *solution.g; int m = g.EdgeCount(); vector<EdgeCost> current(m+1); const bool verbose = true; double precision = EDGE_COST_PRECISION; // run a few iterations of the local search while decaying the perturbation (towards unperturbed solution) int decaytogo = decaysteps; for (;;) { EdgeCost prevcost = solution.GetCost(); // run one round of local seach MSTPrune(g,solution); RunLocalSearch(g, solution, random, 1, config, executionLogPtr); EdgeCost newcost = solution.GetCost(); if (decaytogo == 0) break; if (prevcost - newcost <= precision) { fprintf (stdout, "<%d> ", decaysteps - decaytogo); break; } decaytogo --; g.RetrieveCosts(current); for (int e=1; e<=m; e++) { //current[e] = original[e] + (current[e] - original[e]) * exponent; //NOT REALLY AN EXPONENT current[e] = original[e] + (current[e] - original[e]) * exponent; //NOT REALLY AN EXPONENT } g.ApplyCosts(current); solution.UpdateCost(); } g.ApplyCosts(original); //restore original edge costs solution.UpdateCost(); //recost the existing solution EdgeCost before = solution.GetCost(); // this is the original cost //fprintf (stdout, "d%.0f->", solution.GetCost()); //SPH (g, solution, NULL, root); //fprintf (stdout, "[%d->", solution.GetCost()); // run local search on current solution using original costs MSTPrune(g,solution); RunLocalSearch(g, solution, random, -1, config); EdgeCost after = solution.GetCost(); if (verbose) { fprintf (stdout, " %.0f", after); //fprintf (stdout, " %3.0f", before - after); } //fprintf (stdout, "%d]", solution.GetCost()); } /** * Generate randomized solution from scratch by perturbing edge weights, runing a constructive algorithm, then applying local search. * The local search is applied to the perturbed graph initially, but the perturbation may be gradually dampened (removed). * */ static void GenerateRandomizedSolution(SteinerSolution &solution, int root, vector<EdgeCost> &pertcost, RFWLocalRandom &random, SteinerConfig *config, ExecutionLog *executionLogPtr = nullptr) { Graph &g = *solution.g; int m = g.EdgeCount(); const bool VERBOSE_STEP = false; vector<EdgeCost> original (m+1); //remember original costs g.RetrieveCosts(original); // find local optimum with perturbed costs g.ApplyCosts(pertcost); ConstructiveAlgorithms::SPH (g, solution, NULL, root); if (executionLogPtr != nullptr) { fprintf(stdout, "ADDING SPH SOLUTION WITH COST %f.\n", solution.GetCost()); executionLogPtr->AddSolution(solution); } if (VERBOSE_STEP) { fprintf (stdout, "Done after SPH (%d).\n", solution.GetCost()); fflush(stdout); } int LS_PERT_ROUNDS= std::max(999,g.VertexCount()); double LS_PERT_EXPONENT = 1.0; //no decay if (config) { LS_PERT_ROUNDS = config->LS_PERT_ROUNDS; LS_PERT_EXPONENT = config->LS_PERT_EXPONENT; } if (LS_PERT_EXPONENT <= 0) fatal ("invalid perturbation exponent"); bool DECAY_LOCAL_SEARCH = (LS_PERT_EXPONENT <= 0.999999); if (DECAY_LOCAL_SEARCH) { DecayLocalSearch(solution, original, random, LS_PERT_ROUNDS, LS_PERT_EXPONENT, config, executionLogPtr); } else { MSTPrune(g,solution); if (VERBOSE_STEP) { fprintf (stdout, "Done after MSTPrune (%d).\n", solution.GetCost()); fflush(stdout); } RunLocalSearch(g, solution, random, LS_PERT_ROUNDS, config, executionLogPtr); //, 2); if (VERBOSE_STEP) { fprintf (stdout, "Done after LocalSearch (%d).\n", solution.GetCost()); fflush(stdout); } // find real local optimum g.ApplyCosts(original); solution.UpdateCost(); //SPH (g, solution, NULL, root); //fprintf (stdout, "[%d->", solution.GetCost()); MSTPrune(g,solution); RunLocalSearch(g, solution, random, -1, config); //fprintf (stdout, "%d]", solution.GetCost()); } } static int ComputeCapacity(int maxit, SteinerConfig *config) { double denominator = 1.0; if (config) denominator = config->ELITE_DENOMINATOR; int capacity = (int)ceil(sqrt((double)maxit / denominator)); return capacity; } static void PlainMultistart (SteinerSolution &solution, int maxit, int capacity, SteinerConfig *config) { if (capacity<0) capacity = ComputeCapacity(maxit, config); SolutionPool elite (capacity); RFWLocalRandom random (RFWRandom::getInteger(1,999999999)); FlexibleMultistart (solution, maxit, elite, NULL, maxit, config); //CombinationMultistart (solution, maxit, capacity, NULL, maxit, config); SolutionPool children (capacity); SolutionPool *a, *b; a = &elite; b = &children; fprintf (stdout, "There are %d elite solutions.\n", elite.GetCount()); int maxfail = 100; int failures = maxfail; EdgeCost curbest = elite.FindBestCost(); //fprintf (stdout, "Initial best is %.0f\n", curbest); //exit(-1); while (1) { fprintf (stdout, "HERE!\n"); fflush(stdout); RecombineGeneration(*a, *b, random, config); //if (b->FindBestCost() >= a->FindBestCost()) break; //fprintf (stdout, "Doing stuff now.\n"); //fflush (stdout); //EdgeCost newbest = curbest +1; EdgeCost newbest = b->FindBestCost(); if (newbest >= curbest) { failures --; if (failures == 0) break; } else { failures = maxfail; curbest = newbest; } fprintf (stdout, "New best solution is %.0f\n", curbest); //break; //fprintf (stdout, "Swapping (%d,%d)...\n", a->GetCount(), b->GetCount()); //fflush(stdout); std::swap(a,b); //fprintf (stdout, "Resetting...\n"); //fflush(stdout); b->HardReset(); } fprintf (stdout, "Ended with solution with cost %.0f\n", curbest); } static void CombinationMultistart(SteinerSolution &solution, int maxit, int capacity, char *outprefix, int COMBINATION_THRESHOLD = -1, SteinerConfig *config = NULL, GlobalInfo *ginfo = NULL, ExecutionLog *executionLogPtr = nullptr) { if (capacity<0) capacity = ComputeCapacity(maxit, config); SolutionPool elite (capacity); if (!config->AGGRESSIVE_COMBINATION) COMBINATION_THRESHOLD = capacity; fprintf (stdout, "<<<<< %p >>>>>>\n", ginfo); FlexibleMultistart (solution, maxit, elite, outprefix, COMBINATION_THRESHOLD, config, ginfo, executionLogPtr); } static void TimeBoundedMultistart(SteinerSolution &solution, int mstype, int maxitupper, char *outprefix, int COMBINATION_THRESHOLD = -1, SteinerConfig *config = NULL, GlobalInfo *ginfo = NULL, ExecutionLog *executionLogPtr = nullptr) { SolutionPool tentativeElite(2); RFWTimer tentativeTimer(true); FlexibleMultistart(solution, 1, tentativeElite, outprefix, COMBINATION_THRESHOLD, config, ginfo, executionLogPtr, false, false); double tentativeTime = tentativeTimer.getTime(); int maxit = maxitupper; const double blowupFactor = 2.5; // this fell from the sky after careful consideration if (tentativeTime > 0 && config->TIME_LIMIT > 0) maxit = static_cast<int>(ceil(config->TIME_LIMIT / tentativeTime / blowupFactor)); if (maxit > maxitupper) maxit = maxitupper; fprintf(stdout, "TENTATIVE TIME: %.3f SEC; WILL COMPUTE %d ITERATIONS (UPPER BOUND = %d).\n", tentativeTime, maxit, maxitupper); int capacity = ComputeCapacity(maxit, config); SolutionPool elite(capacity); if (!config->AGGRESSIVE_COMBINATION) COMBINATION_THRESHOLD = capacity; // Determine which multistart to call depending on mstype. int localtype = mstype; if (mstype == MS_TIMEBOUNDEDADAPTIVE) { localtype = maxit > 2048 ? MS_TIMEBOUNDEDMULTILEVEL : MS_TIMEBOUNDEDCOMBINATION; } if (localtype == MS_TIMEBOUNDEDCOMBINATION) { FlexibleMultistart(solution, maxitupper, elite, outprefix, COMBINATION_THRESHOLD, config, ginfo, executionLogPtr); } else if (localtype == MS_TIMEBOUNDEDMULTILEVEL) { MultilevelMultistart(solution, maxit, maxitupper, capacity, outprefix, COMBINATION_THRESHOLD, config, ginfo, executionLogPtr); } else { fatal("Not supported."); } if (tentativeElite.FindBestCost() < solution.GetCost()) { solution.CopyFrom(tentativeElite.GetReference(tentativeElite.FindBestPosition())); fprintf(stdout, "USING SOLUTION FROM FIRST TENTATIVE ITERATION.\n"); } fprintf(stdout, "actualmstype %d\n", localtype); } template<class T> static void Permute(vector<T> &array, RFWLocalRandom &random) { int s = (int)array.size(); for (int i=0; i<s-1; i++) { int j = random.GetInteger(i,s-1); std::swap(array[i], array[j]); } } static void MultilevelMultistart(SteinerSolution &solution, int maxit, int maxitupper, int capacity, char *outprefix, int COMBINATION_THRESHOLD = -1, SteinerConfig *config = NULL, GlobalInfo *ginfo = NULL, ExecutionLog *executionLogPtr = nullptr) { fprintf (stdout, "SHOULD BE RUNNING MMS FROM SOLUTION %.0f\n", solution.GetCost()); fflush(stdout); if (capacity<0) capacity = ComputeCapacity(maxit, config); const int SUBCOUNT = 4; SolutionPool *subelite[SUBCOUNT]; if (!config->AGGRESSIVE_COMBINATION) COMBINATION_THRESHOLD = capacity; int localit = maxit / (2*SUBCOUNT); int subcapacity = ComputeCapacity(localit, config); int restit = maxit - SUBCOUNT*localit; int restcap = ComputeCapacity(restit, config); SolutionPool elite(restcap); // Only do phase one of the algorithm if there are enough iterations. if (localit > 0) { for (int i = 0; i < SUBCOUNT; i++) { subelite[i] = new SolutionPool(subcapacity); } // Runs subcount independent multistarts using half the total number of iterations. for (int i = 0; i < SUBCOUNT; i++) { fprintf(stdout, "SUBPROBLEM %d\n", i); FlexibleMultistart(solution, localit, *subelite[i], outprefix, COMBINATION_THRESHOLD, config, ginfo, executionLogPtr); fprintf(stdout, "\n\n"); } vector<SteinerSolution*> solpointers; for (int i = 0; i < SUBCOUNT; i++) { subelite[i]->Output(stdout, 8); fprintf(stdout, "\n"); int count = subelite[i]->GetCount(); for (int j = 1; j <= count; j++) solpointers.push_back(subelite[i]->GetReference(j)); } RFWLocalRandom random(RFWRandom::getInteger(1, 1000000)); Permute(solpointers, random); //std::sort(solpointers.begin(), solpointers.end(), [&](SteinerSolution *x, SteinerSolution *y) {return x->GetCost() >= y->GetCost();}); //sort (&perm[0], &perm[perm.size()], [&](int x, int y) {return totalbound[x]/count[x] > totalbound[y]/count[y];}); for (int i = 0; i < (int)solpointers.size(); i++) { SteinerSolution *sol = solpointers[i]; if (sol->GetCost() < solution.GetCost()) { solution.CopyFrom(sol); } elite.Add(sol); } for (int i = 0; i<SUBCOUNT; i++) { delete subelite[i]; } /* for (int i=0; i<SUBCOUNT; i++) { subelite[i]->Output(stdout, 8); fprintf (stdout, "\n"); int count = subelite[i]->GetCount(); for (int j=1; j<=count; j++) { SteinerSolution *sol = subelite[i]->GetReference(j); if (sol->GetCost() < solution.GetCost()) {solution.CopyFrom(sol);} elite.Add(sol); } }*/ elite.Output(stdout, 8); } fprintf (stdout, "SUPERPROBLEM (from solution %.0f):\n", solution.GetCost()); FlexibleMultistart(solution, maxitupper - SUBCOUNT*localit, elite, outprefix, COMBINATION_THRESHOLD, config, ginfo, executionLogPtr); } static void RecombineGeneration (SolutionPool &parents, SolutionPool &children, RFWLocalRandom &random, SteinerConfig *config) { int pcount = parents.GetCount(); if (pcount == 0) return; fprintf (stdout, "Should be combining.\n"); Graph &g = *(parents.GetReference(1)->g); SteinerSolution newsol(&g); SteinerSolution bestsol(&g); bestsol.CopyFrom(parents.GetReference(parents.FindBestPosition())); //fprintf (stdout, "WARNING: THIS IS VERY WRONG.\n"); for (int i=1; i<pcount; i++) { SteinerSolution *a = parents.GetReference(i); fprintf (stdout, " %.0f", a->GetCost()); for (int j=i+1; j<=pcount; j++) { SteinerSolution *b = parents.GetReference(j); CombineSolutions(newsol, *a, *b, random, config); //fprintf (stdout, "%.0f x %.0f: %.0f\t", a->GetCost(), b->GetCost(), newsol.GetCost()); if (newsol.GetCost() < bestsol.GetCost()) { bestsol.CopyFrom(&newsol); } children.Add(&newsol); } } // make sure the best solution (even if from a previous iteration) is preserved children.Add(&bestsol); fprintf (stdout, "Best solution is %.0f\n", bestsol.GetCost()); } static void FlexibleMultistart(SteinerSolution &solution, int maxit, SolutionPool &elite, char *outprefix, int COMBINATION_THRESHOLD = -1, SteinerConfig *config = NULL, GlobalInfo *ginfo = NULL, ExecutionLog *executionLogPtr = nullptr, bool USE_PERTURBATION = true, bool outputStats = true) { Graph &g = *solution.g; //AddSmallPerturbation(g); int itbits = 6; //int maxit = 1 << itbits; //int capacity = 1 << (itbits / 2); /* if (capacity < 0) { double denominator = 1.0; if (config) denominator = config->ELITE_DENOMINATOR; capacity = (int)ceil(sqrt((double)maxit / denominator)); }*/ int n = g.VertexCount(); SteinerSolution bestsol(&g); //best solution found so far SteinerSolution combsol(&g); //combined solution bestsol.CopyFrom(&solution); //SolutionPool elite (capacity); int capacity = elite.GetCapacity(); //Graph &g = *solution.g; int m = g.EdgeCount(); RFWTimer timer(true); RFWLocalRandom random (RFWRandom::getInteger(1,999999999)); EdgeCost bestcost = INFINITE_COST; if (elite.GetCount() > 0) { int p = elite.FindBestPosition(); bestsol.CopyFrom(elite.GetReference(p)); bestcost = bestsol.GetCost(); } const bool verbose = false; //bool = perturbation; bool ADAPTIVE_PERTURBATION = false; bool RESILIENT_PERTURBATION = USE_PERTURBATION; if (config) { int confpert = config->RESILIENT_PERTURBATION; if (confpert == 0) {RESILIENT_PERTURBATION = false;} else if (confpert == 1) {RESILIENT_PERTURBATION = true;} } fprintf (stdout, "Using resilient perturbation? %d\n", RESILIENT_PERTURBATION); //int COMBINATION_THRESHOLD = -1; //capacity; fprintf (stdout, "Running multistart for %d iterations and %d elite solutions (perturbation=%d).\n", maxit, capacity, USE_PERTURBATION); //fflush (stdout); vector<EdgeCost> pertcost (m+1,-1); const bool VERBOSE_STEP = false; int i; for (i=0; i<maxit; i++) { //if (ginfo) fprintf (stdout, "THERE IS A GINFO.\n"); if (ginfo && ginfo->IsSolved()) { fprintf (stdout, "Stopping at iteration %d.\n", i); break; } if (config->TIME_LIMIT > 0 && executionLogPtr->timerPtr->getTime() >= config->TIME_LIMIT) { fprintf(stdout, "Stopping at iteration %d because of time limit of %2.f sec (%.2f sec passed).\n", i, config->TIME_LIMIT, executionLogPtr->timerPtr->getTime()); break; } int root = random.GetInteger(1,n); //PickRandomTerminal(g, random); if (USE_PERTURBATION) { ADAPTIVE_PERTURBATION = false; //((i % 2) == 1); //random.GetInteger(0,1)==0; //ADAPTIVE_PERTURBATION = true; if (ADAPTIVE_PERTURBATION) { PerturbationTools::AdaptivePerturbation(g, pertcost, elite, random); } else { //fprintf (stdout, "Here!\n"); PerturbationTools::InitPerturbation(g, pertcost, random, config); //fflush (stdout); } } if (RESILIENT_PERTURBATION) { // both constructive and the local search use perturbation GenerateRandomizedSolution (solution, root, pertcost, random, config); } else { // non-resilient perturbation: ConstructiveAlgorithms::SPH (g, solution, USE_PERTURBATION ? &pertcost[0] : NULL, root); if (executionLogPtr != nullptr) executionLogPtr->AddSolution(solution); MSTPrune(g,solution); RunLocalSearch(g, solution, random, -1, config, executionLogPtr); } if (executionLogPtr != nullptr) { executionLogPtr->AddSolution(solution); } //fprintf (stdout, "Adding original solution...\n"); elite.Add(&solution); //add initial solution to the pool //global.UpdateSolution(solution.GetCost()); //make sure we remember it's the best if (i >= COMBINATION_THRESHOLD) { CascadedCombination(solution, combsol, elite, -1, random, config); } else { fprintf (stdout, "! "); } EdgeCost solcost = solution.GetCost(); //fprintf (stdout, "Found solution costing %d.\n", solcost); if (solcost < bestcost) { if (executionLogPtr != nullptr) { executionLogPtr->AddSolution(solution); } bestcost = solcost; bestsol.CopyFrom(&solution); //fprintf (stdout, "HERE (%.2f %p %p %.2f).\n", bestcost, outprefix, config, config->OUTPUT_THRESHOLD); if (outprefix) { if (config && bestcost < config->OUTPUT_THRESHOLD) { fprintf (stdout, "\n<outputting solution with value %0f>\n", bestcost); config->OUTPUT_THRESHOLD = bestcost; bestsol.Output(outprefix); } } if (ginfo) ginfo->UpdateBestFound(bestcost); } bool localverbose = ((i % 10) == 0); if (localverbose) { fprintf (stdout, "%6d : %6d : %10.0f : %10.5f\n", i, root, (double)solcost, (double)bestcost); fflush(stdout); } if (config && bestcost <= config->EARLY_STOP_BOUND) { fprintf (stdout, "Early stop!\n"); break; } //elite.Output(stdout); //if (i % 10 == 0) fflush (stdout); } if (outputStats) { //fprintf (stdout, "msiterations %d\n", maxit); fprintf(stdout, "actualiterations %d\n", i); fprintf(stdout, "earlystop %d\n", maxit != i); fprintf(stdout, "mselite %d\n", capacity); //fprintf (stdout, "totaltimeseconds %.8f\n", timer.getTime()); //fprintf (stdout, "mssolution %.0f\n", (double)bestcost); } fflush (stdout); solution.CopyFrom(&bestsol); } static void Multistart (SteinerSolution &solution, SteinerConfig *config) { int maxit = 9999; Graph &g = *solution.g; int m = g.EdgeCount(); RFWTimer timer(true); RFWLocalRandom random (RFWRandom::getInteger(1,999999999)); EdgeCost bestcost = INFINITE_COST; bool USE_PERTURBATION = true; fprintf (stdout, "Running multistart for %d iterations with perturbation=%d.\n", maxit, USE_PERTURBATION); vector<EdgeCost> pertcost (m+1,-1); for (int i=0; i<maxit; i++) { int root = Basics::PickRandomTerminal(g, random); if (USE_PERTURBATION) PerturbationTools::InitPerturbation(g, pertcost, random, NULL); ConstructiveAlgorithms::SPH (g, solution, USE_PERTURBATION ? &pertcost[0] : NULL, root); MSTPrune(g,solution); RunLocalSearch(g, solution, random, -1, config); EdgeCost solcost = solution.GetCost(); if (solcost < bestcost) { bestcost = solcost; } if (i % 10 == 0) { fprintf (stdout, "%6d : %6d : %6d : %6d\n", i, root, solcost, bestcost); fflush(stdout); } //if (i % 10 == 0) fflush (stdout); } fflush (stdout); fprintf (stdout, "totaltimeseconds %.8f\n", timer.getTime()); fprintf (stdout, "solution %d\n", bestcost); /* if (LOCALSEARCH) { int maxit = 10; for (int m=0; m<5; m++) { double besttime = 99999999; fprintf (stdout, "Running %d... ", m); fflush(stdout); if (m==3) { fprintf (stdout, "WARNING! SKIPPING METHOD 3.\n"); continue; } //if (m != 2) continue; for (int i=0; i<maxit; i++) { RFWTimer timer(true); SteinerSolution solution(&g); switch (m) { case 0: MSTPrim (g, solution); break; case 1: MSTKruskal (g, solution); break; case 2: SPH (g, solution, NULL, 1); break; case 3: FullBoruvka (g, solution); break; case 4: TestLocalSearch (g, solution); break; } if (m>=2 && MSTPRUNE) { MSTPrune(g,solution); } solcost = solution.GetCost(); double t = timer.getTime(); if (t < besttime) besttime = t; } fprintf (stdout, "Method %d found solution of cost %d in %.3f milliseconds (best of %d runs).\n", m, solcost, besttime * 1000.0, maxit); fflush(stdout); } }*/ } static void RunLocalSearch (Graph &g, SteinerSolution &solution, RFWLocalRandom &random, int maxrounds, SteinerConfig *config, ExecutionLog *executionLogPtr = nullptr) { bool RUN_Q = false; bool RUN_V = false; bool RUN_U = false; bool RUN_K = false; bool RESTRICT_K = false; // bool verbose = false; static bool first = true; char *lstype = config->LSTYPE; bool wait = false; for (int i=0; ;i++) { char c = lstype[i]; if (c==0) break; if (c=='w') { wait = true; continue; } switch (c) { case 'k': RUN_K = true; RESTRICT_K = wait; break; case 'v': RUN_V = true; break; case 'u': RUN_U = true; break; case 'q': RUN_Q = true; break; default: fprintf (stdout, "WARNING: invalid local search parameter (%c).\n", c); } wait = false; } //fprintf (stdout, "<%d%d>", RUN_K, RESTRICT_K); //return; //int maxrounds = 999; if (maxrounds < 0) maxrounds = 999; //999; //large number if (first) { fprintf (stdout, "Running with maxrounds %d.\n", maxrounds); } //SPH (g, solution, NULL, PickRandomTerminal(g,random)); //if (verbose) fprintf (stdout, "SPH found solution %d.\n", solution.GetCost()); int n = g.VertexCount(); EdgeCost oldcost = solution.GetCost(); RFWTimer timer(true); int rounds = 0; int i=0; for (i=0; i<maxrounds; i++) { rounds ++; if (RUN_V) { LSVertexInsertion::VertexInsertion(g, solution, n, random); MSTPrune(g, solution); if (verbose) fprintf (stdout, " v%d ", solution.GetCost()); } //fprintf (stdout, "Starting key vertex elimination!\n"); //fflush (stdout); if (RUN_Q) { LSKeyPath::KeyVertexElimination(g, solution, random); //fprintf (stdout, "Ending key vertex elimination!\n"); if (verbose) fprintf (stdout, " q%d ", solution.GetCost()); } if (RUN_U) { LSVertexElimination::VertexElimination(g, solution, random); if (verbose) fprintf (stdout, " u%d ", solution.GetCost()); } if (RUN_K) { if (!RESTRICT_K || solution.GetCost() == oldcost) { KeyVertexInsertion(solution, random); } } EdgeCost newcost = solution.GetCost(); if (newcost > oldcost + EDGE_COST_PRECISION) fatal ("invalid result"); if (newcost > oldcost - EDGE_COST_PRECISION) break; //stop if result did not improve if (executionLogPtr != nullptr) executionLogPtr->AddSolution(solution); oldcost = newcost; } const bool VERBOSE_ROUNDS = false; if (VERBOSE_ROUNDS) fprintf (stdout, "%d ", i); //fprintf (stdout, "%d ", rounds); if (verbose) fprintf (stdout, "\n\nDone with local search: %d (%.2f ms, %.2f ms average)\n", solution.GetCost(), 1000 * timer.getTime(), 1000 * timer.getTime() / (double)rounds); //exit(-1); first = false; } static void TestLocalSearch (Graph &g, SteinerSolution &solution) { RFWLocalRandom random(RFWRandom::getInteger(1,999999999)); bool RUN_Q = true; bool RUN_V = true; bool RUN_U = false; bool verbose = false; ConstructiveAlgorithms::SPH (g, solution, NULL, Basics::PickRandomTerminal(g,random)); if (verbose) fprintf (stdout, "SPH found solution %d.\n", solution.GetCost()); int n = g.VertexCount(); EdgeCost oldcost = solution.GetCost(); RFWTimer timer(true); int rounds = 0; for (int i=0; i<10; i++) { rounds ++; if (RUN_V) { LSVertexInsertion::VertexInsertion(g, solution, n, random); MSTPrune(g, solution); if (verbose) fprintf (stdout, " v%d ", solution.GetCost()); } //fprintf (stdout, "Starting key vertex elimination!\n"); //fflush (stdout); if (RUN_Q) { LSKeyPath::KeyVertexElimination(g, solution, random); //fprintf (stdout, "Ending key vertex elimination!\n"); if (verbose) fprintf (stdout, " q%d ", solution.GetCost()); } if (RUN_U) { LSVertexElimination::VertexElimination(g, solution, random); if (verbose) fprintf (stdout, " u%d ", solution.GetCost()); } EdgeCost newcost = solution.GetCost(); if (newcost > oldcost) fatal ("invalid result"); if (newcost == oldcost) break; oldcost = newcost; } if (verbose) fprintf (stdout, "\n\nDone with local search: %d (%.2f ms, %.2f ms average)\n", solution.GetCost(), 1000 * timer.getTime(), 1000 * timer.getTime() / (double)rounds); //exit(-1); } // Comput minimum spanning tree of the graph g using Prim's algorithm (ignores terminals) // 'solution' will contain the MST edges static void MSTPrim (Graph &g, SteinerSolution &solution) { bool verbose = false; int n = g.VertexCount(); BinaryHeap<EdgeCost> heap(n); // = new BinaryHeap<ArcCost>(n); vector<int> parc (n+1); //not need to initialize unsigned int r = Basics::PickRandomTerminal(g); parc[r] = 0; int nscanned = 0; solution.Reset(); //int inscount = 0; heap.Insert(r, 0); while (!heap.IsEmpty()) { unsigned int v; EdgeCost acost; heap.RemoveFirst(v,acost); //v, out acost); if (v!=r) solution.Insert(parc[v]); //add parent edge to the solution //scan outgoing arcs nscanned ++; SPGArc *a, *end; for (g.GetBounds(v,a,end); a<end; a++) { int w = a->head; //neighbor if (solution.GetDegree(w) > 0) continue; //ignore if already in solution if (heap.Insert(w, a->cost)) { parc[w] = a->label; //inscount ++; } } } //fprintf (stdout, "%.3f ", (double)inscount / (double)n); //if (nscanned != n) fprintf (stdout, "Warning: graph is not connected"); } //-------------------------------------------------------------- // Kruskal's algorithm to compute the MST of the full graph. // (Could be made faster for some instances with partial sort.) //-------------------------------------------------------------- static void MSTKruskal (Graph &g, SteinerSolution &solution) { // create list of all edges sorted in increasing order of weight const bool verbose = false; int i, m = g.EdgeCount(); vector<int> elist(m+1); for (i=0; i<m; i++) {elist[i] = i+1;} sort(&elist[0], &elist[m], [&](int x, int y) {return g.GetCost(x)<g.GetCost(y);}); // create empty solution and union find with singletons solution.Reset(); int n = g.VertexCount(); UnionFind uf(n); int togo = n-1; //number of unions left // process all edges in order for (i=0; i<m; i++) { int e = elist[i]; int v, w; g.GetEndpoints(e,v,w); if (uf.Union(v,w)) { //if successfully joined... solution.Insert(e); //...we have a new edge in the tree if (--togo==0) break; } } if (verbose) fprintf (stdout, "%.3f ", (double)i/(double)m); } /// Compute the MST of the distance network of the subgraph induced /// by the vertices in bases. /// <param name="solution">Final solution (output)</param> /// <param name="bases">Set of bases (key vertices)</param> /* static void DNH(Graph &g, SteinerSolution &solution, UniverseSet &baselist) { int n = g.VertexCount(); int m = g.EdgeCount(); VoronoiData voronoi = new VoronoiData(n); UnionFind uf = new UnionFind(n); BinaryHeap<ArcCost> heap = new BinaryHeap<ArcCost>(n); ArcCost [] pertcost = null; ComputeVoronoi(voronoi, baselist, heap, pertcost); solution.Reset(); Boruvka(solution, voronoi, uf, pertcost); //Console.Error.WriteLine("DNH"); } */ /// Boruvka-based implementation of DNH (given a Voronoi diagram and the associated union-find data structure). /// Traverses a list of edge IDs in each pass, eliminating those that are no longer boundary. /// Seems to be worse than the version that actually traverses graphs. static void Boruvka(Graph &g, SteinerSolution &solution, VoronoiData &voronoi, UnionFind &uf, EdgeCost *pertcost) { int v, n = g.VertexCount(); int m = g.EdgeCount(); EdgeCost solvalue = 0; const bool verbose = false; //count boundary regions in the current diagram int nregions = 0; for (v=1; v<=n; v++) { // it's not clear find is needed, unless some merges happened before if (uf.Find(voronoi.GetBase(v))==v) nregions ++; } //Boruvka's algorithm int *minarc = new int[n+1]; //minimum outgoing edge from the region based in v EdgeCost *minvalue = new EdgeCost[n+1]; //value associated with the neighbor int *elist = new int [m]; //list of all potential boundary edges for (int i=0; i<m; i++) elist[i] = (i+1); int ecount = m; //number of edges in edge list int rounds = 0; bool changes = true; while (changes && nregions > 1) { rounds ++; //if (rounds > 3) break; changes = false; //initially, we don't know what are the arcs out of each component for (v=1; v<=n; v++) { minarc[v] = -1; minvalue[v] = 0; } int nextpos = 0; //fprintf (stdout, "%d ", ecount); for (int i=0; i<ecount; i++) { int e = elist[i]; //get edge in the current position int v, w; g.GetEndpoints(e,v,w); int bv = voronoi.GetBase(v); int bw = voronoi.GetBase(w); if (bv == bw) continue; //same base bv = uf.Find(bv); bw = uf.Find(bw); if (bv == bw) continue; //same component //found a boundary edge: move it forward in the list if (i!=nextpos) elist[nextpos++] = e; // get length of actual edge EdgeCost cost = (pertcost!=NULL) ? pertcost[e] : g.GetCost(e); cost += voronoi.GetDistance(v) + voronoi.GetDistance(w); //update bv and bw if better if (minarc[bv]==-1 || (cost<minvalue[bv])) {minarc[bv]=e; minvalue[bv]=cost;} if (minarc[bw]==-1 || (cost<minvalue[bw])) {minarc[bw]=e; minvalue[bw]=cost;} } ecount = nextpos; //fewer edges for next round //join each region to its best neighbor int bvisited = 0; int b; for (b=1; b<=n; b++) { if (minarc[b] >= 0) { //best neighbor defined... bvisited ++; int w = 0; g.GetEndpoints(minarc[b], v, w); int bv = voronoi.GetBase(v); int bw = voronoi.GetBase(w); if (uf.Union(bv,bw)) { // if they were not already joined... changes = true; nregions--; solution.Insert(minarc[b]); while (v != bv) { int f = voronoi.GetParentArc(v); if (!solution.Insert(f)) break; v = g.GetOther(f, v); } while (w != bw) { int f = voronoi.GetParentArc(w); if (!solution.Insert(f)) break; w = g.GetOther(f, w); } } } } } if (verbose) fprintf(stdout, "\n"); delete [] minvalue; delete [] minarc; delete [] elist; } static void DNHPrim (Graph &g, SteinerSolution &solution, VoronoiData &voronoi, UnionFind &uf) { int n = g.VertexCount(); int m = g.EdgeCount(); vector <int> new2old (m+1,-1); Graph dg; dg.SetVertices(n); dg.SetEdges(m); //maximum tentative number of edges // build appropriate subgraph of the distance network int ecount = 0; for (int v=1; v<=n; v++) { int bv = uf.Find(voronoi.GetBase(v)); EdgeCost vdist = -1; SPGArc *a, *end; for (g.GetBounds(v,a,end); a<end; a++) { int w = a->head; if (v>=w) continue; int bw = uf.Find(voronoi.GetBase(w)); if (bv == bw) continue; dg.MakeTerminal(bv); dg.MakeTerminal(bw); if (vdist<0) vdist = voronoi.GetDistance(v); EdgeCost cost = a->cost + vdist + voronoi.GetDistance(w); dg.AddEdge(bv,bw,cost); new2old[++ecount] = a->label; } } dg.Commit(); //create actual graph // find a solution in the new graph SteinerSolution ds(&dg); MSTPrim(dg,ds); // transform into solution in the new graph solution.Reset(); int newm = dg.EdgeCount(); for (int e=1; e<=newm; e++) { if (!ds.Contains(e)) continue; int orige = new2old[e]; //original boundary edge if (orige<1 || orige>m) {fatal ("Edge out of range.\n");} int v,w; g.GetEndpoints(orige,v,w); solution.Insert(orige); for (;;) { int f = voronoi.GetParentArc(v); if (f<1 || !solution.Insert(f)) break; v = g.GetOther(f,v); } for (;;) { int f = voronoi.GetParentArc(w); if (f<1 || !solution.Insert(f)) break; w = g.GetOther(f,w); } } } /// Run DNH (Boruvka implementation) to create a solution from scratch. /// If 'r' is null, uses the original edge weights; otherwise, perturbs /// edge weights before running the algorithm. /// <param name="solution">The output solution (will be deleted).</param> /// <param name="r">Random number generator.</param> static void FullBoruvka(Graph &g, SteinerSolution &solution) { //, OptRandom r) { int n = g.VertexCount(); int m = g.EdgeCount(); UniverseSet baselist(n); // = new UniverseSet(n); VoronoiData voronoi(n); // = new VoronoiData(n); UnionFind uf(n); // = new UnionFind(n); BinaryHeap<EdgeCost> heap(n); // = new BinaryHeap<ArcCost>(n); /* ArcCost [] pertcost = null; if (r != null) { pertcost = new ArcCost[m + 1]; InitPerturbation(pertcost, r); } */ baselist.Reset(); for (int v = 1; v <= n; v++) { if (g.IsTerminal(v)) baselist.Insert(v); } //fprintf (stdout, "Should be computing Voronoi.\n"); Basics::ComputeVoronoi(g, voronoi, baselist, heap, NULL); //pertcost); solution.Reset(); // return; //fprintf (stdout, "Missing boruvka!\n"); //Boruvka(g, solution, voronoi, uf, NULL); Preprocessing::BoruvkaGraph(g, solution, voronoi, uf, NULL); //DNHPrim(g,solution,voronoi,uf); //Console.Error.WriteLine("t3:{0} ", timer.GetTime()); } /// Modifies a solution by computing the MST of the subgraph induced by its vertices /// and removing all vertices of degree one. Changes the solution. /// <param name="svertices">Vertices in the solution (doesn't change).</param> /// <param name="solution">Edges in the solution (may change).</param> static bool MSTPrune(Graph &g, SteinerSolution &solution) { //return 0; EdgeCost original = solution.GetCost(); int n = g.VertexCount(); UniverseSet svertices(n); Basics::MarkSolutionNodes(g, solution, svertices); MST(g,solution,svertices); Basics::Prune(g,solution); //fprintf (stdout, "Solution costs %d, gain is %d.\n", solution.GetCost(), original - solution.GetCost()); //Prune(g,solution); //fprintf (stdout, "g%d ", original - solution.GetCost()); return (solution.GetCost() < original) ? 1 : 0; } static int VeryOldPickTerminal(Graph &g, RFWLocalRandom &random) { int t = 0; int count = 0; int n = g.VertexCount(); for (int v=1; v<=n; v++) { if (g.IsTerminal(v)) { if (t==0 || g.GetDegree(v) < g.GetDegree(t)) { t = v; } } } return t; } /// <summary> /// Compute the MST of the subgraph induced by svertices /// (assumed to contain all terminals---DO I NEED THIS?) /// </summary> /// <param name="svertices">list of vertices to be spanned</param> /// <param name="solution">solution in which the MST will be stored</param> /// <returns>Number of vertices scanned.</returns> static int MST (Graph &g, SteinerSolution &solution, UniverseSet &svertices) { bool verbose = false; int n = g.VertexCount(); BinaryHeap<EdgeCost> heap(n); vector<int> parc (n+1); int r = Basics::PickRandomTerminal(g); if (!svertices.Contains(r)) fatal ("Terminal does not appear to belong to the solution."); parc[r] = 0; int nscanned = 0; //run Prim's algorithm solution.Reset(); heap.Insert(r, 0); while (!heap.IsEmpty()) { unsigned int v; EdgeCost acost; heap.RemoveFirst(v,acost); if (v!=r) solution.Insert(parc[v]); //add edge (p(v),v) to solution //scan vertices nscanned ++; SPGArc *a, *end; for (g.GetBounds(v,a,end); a<end; a++) { int w = a->head; if (!svertices.Contains(w)) continue; //we only care about svertex if (solution.GetDegree(w) > 0) continue; //vertex already in the new tree if (heap.Insert(w, a->cost)) parc[w] = a->label; } } //if (solution.Count() < g.TerminalCount() - 1) {fatal ("solution does not have enough vertices");} return nscanned; } };
GB_binop__first_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_bool) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__first_bool) // A.*B function (eWiseMult): GB (_AemultB_03__first_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_bool) // A*D function (colscale): GB (_AxD__first_bool) // D*A function (rowscale): GB (_DxB__first_bool) // C+=B function (dense accum): GB (_Cdense_accumB__first_bool) // C+=b function (dense accum): GB (_Cdense_accumb__first_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_bool) // C=scalar+B GB (_bind1st__first_bool) // C=scalar+B' GB (_bind1st_tran__first_bool) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = aij #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_BOOL || GxB_NO_FIRST_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__first_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__first_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__first_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB (_bind1st_tran__first_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
partition.h
//===------------------------------------------------------------*- C++ -*-===// // // Ripples: A C++ Library for Influence Maximization // Marco Minutoli <marco.minutoli@pnnl.gov> // Pacific Northwest National Laboratory // //===----------------------------------------------------------------------===// // // Copyright (c) 2019, Battelle Memorial Institute // // Battelle Memorial Institute (hereinafter Battelle) hereby grants permission // to any person or entity lawfully obtaining a copy of this software and // associated documentation files (hereinafter “the Software”) to redistribute // and use the Software in source and binary forms, with or without // modification. Such person or entity may use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and may permit // others to do so, subject to the following conditions: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Other than as used herein, neither the name Battelle Memorial Institute or // Battelle may be used in any form whatsoever without the express written // consent of Battelle. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //===----------------------------------------------------------------------===// #ifndef RIPPLES_PARTITION_H #define RIPPLES_PARTITION_H #include <algorithm> #include <iostream> #include "ripples/utility.h" namespace ripples { //! Sequential swap ranges. //! //! \tparam ItrTy1 The iterator type of the first sequence. //! \tparam ItrTy2 The iterator type of the second sequence. //! //! \param B The begin of the first sequence. //! \param E The end of the second sequence. //! \param O The begin of the second sequence. //! \return The iterator to the one-past last element swapped. template <typename ItrTy1, typename ItrTy2> ItrTy2 swap_ranges(ItrTy1 B, ItrTy1 E, ItrTy2 O, sequential_tag) { return std::swap_ranges(B, E, O); } template <typename ItrTy1, typename ItrTy2> ItrTy2 swap_ranges(ItrTy1 B, ItrTy1 E, ItrTy2 O, size_t num_threads) { size_t toBeSwaped = std::distance(B, E); #pragma omp parallel for num_threads(num_threads) for (size_t i = 0; i < toBeSwaped; ++i) { std::iter_swap(B + i, O + i); } return O + toBeSwaped; } //! Parallel swap ranges. //! //! \tparam ItrTy1 The iterator type of the first sequence. //! \tparam ItrTy2 The iterator type of the second sequence. //! //! \param B The begin of the first sequence. //! \param E The end of the second sequence. //! \param O The begin of the second sequence. //! \return The iterator to the one-past last element swapped. template <typename ItrTy1, typename ItrTy2> ItrTy2 swap_ranges(ItrTy1 B, ItrTy1 E, ItrTy2 O, omp_parallel_tag) { size_t num_threads(0); #pragma omp single { num_threads = omp_get_max_threads(); } return swap_ranges(B, E, O, num_threads); } namespace { template <typename ItrTy, typename ex_tag = omp_parallel_tag> struct PartitionIndices { ItrTy begin; ItrTy end; ItrTy pivot; PartitionIndices() : begin(), end(), pivot() {} PartitionIndices(PartitionIndices &&O) : begin{std::move(O.begin)}, end{std::move(O.end)}, pivot{std::move(O.pivot)} {} PartitionIndices &operator=(PartitionIndices &&O) { this->begin = std::move(O.begin); this->end = std::move(O.end); this->pivot = std::move(O.pivot); return *this; } PartitionIndices(const PartitionIndices &O) : begin{O.begin}, end{O.end}, pivot{O.pivot} {} PartitionIndices &operator=(const PartitionIndices &O) { this->begin = O.begin; this->end = O.end; this->pivot = O.pivot; return *this; } PartitionIndices(ItrTy B, ItrTy E, ItrTy P) : begin{B}, end{E}, pivot{P} {} PartitionIndices(ItrTy B, ItrTy E) : PartitionIndices(B, E, E) {} bool operator==(const PartitionIndices &O) const { return this->begin == O.begin && this->end == O.end && this->pivot == O.pivot; } PartitionIndices mergeBlocks(const PartitionIndices &O, size_t num_threads) { PartitionIndices result(*this); if (this->pivot == this->begin && O.pivot == O.begin) { result.end = O.end; return result; } else if (this->pivot == this->end) { result.end = O.end; result.pivot = O.pivot; return result; } if (std::distance(this->pivot, this->end) < std::distance(O.begin, O.pivot)) { size_t toBeMoved = std::distance(this->pivot, this->end); swap_ranges(this->pivot, this->end, std::prev(O.pivot, toBeMoved), num_threads); result.pivot = std::prev(O.pivot, toBeMoved); } else { result.pivot = swap_ranges(O.begin, O.pivot, this->pivot, num_threads); } result.end = O.end; return result; } // PartitionIndices operator+(const PartitionIndices &O) { // PartitionIndices result(*this); // if (this->pivot == this->begin && O.pivot == O.begin) { // result.end = O.end; // return result; // } else if (this->pivot == this->end) { // result.end = O.end; // result.pivot = O.pivot; // return result; // } // if (std::distance(this->pivot, this->end) < // std::distance(O.begin, O.pivot)) { // size_t toBeMoved = std::distance(this->pivot, this->end); // swap_ranges(this->pivot, this->end, std::prev(O.pivot, toBeMoved), // ex_tag{}); // result.pivot = std::prev(O.pivot, toBeMoved); // } else { // result.pivot = swap_ranges(O.begin, O.pivot, this->pivot, ex_tag{}); // } // result.end = O.end; // return result; // } }; } // namespace template <typename ItrTy, typename UnaryPredicate> ItrTy partition(ItrTy B, ItrTy E, UnaryPredicate P, size_t num_threads) { std::vector<PartitionIndices<ItrTy>> indices(num_threads, PartitionIndices<ItrTy>(B, E)); #pragma omp parallel num_threads(num_threads) { size_t num_elements = std::distance(B, E); size_t threadnum = omp_get_thread_num(), numthreads = omp_get_num_threads(); size_t low = num_elements * threadnum / numthreads, high = num_elements * (threadnum + 1) / numthreads; indices[threadnum].begin = B + low; indices[threadnum].end = std::min(E, B + high); indices[threadnum].pivot = std::partition(indices[threadnum].begin, indices[threadnum].end, P); } for (size_t j = 1; j < num_threads; j <<= 1) { #pragma omp parallel num_threads(num_threads >> j) { #pragma omp for schedule(dynamic) for (size_t i = 0; i < (num_threads - j); i += j * 2) { indices[i] = indices[i].mergeBlocks(indices[i + j], std::min(2 * j, num_threads)); } } } return indices[0].pivot; } //! Reorder a sequence in such a way that all the element for which a predicate //! is true preceed the one for which the predicate is false. //! \tparam ItrTy The type of the iterator of the input sequence. //! \tparam UnaryPredicate The type of a unary predicate object. //! //! \param B The start of the sequence to be partitioned. //! \param E The end of the sequence to be partitioned. //! \param P A C++ collable object implementing the predicate. //! \return An iterator to the first element for which the predicate is false. template <typename ItrTy, typename UnaryPredicate> ItrTy partition(ItrTy B, ItrTy E, UnaryPredicate P, omp_parallel_tag) { size_t num_threads(1); #pragma omp single { num_threads = omp_get_max_threads(); } return partition(B, E, P, num_threads); } //! Reorder a sequence in such a way that all the element for which a predicate //! is true preceed the one for which the predicate is false. //! //! \tparam ItrTy The type of the iterator of the input sequence. //! \tparam UnaryPredicate The type of a unary predicate object. //! //! \param B The start of the sequence to be partitioned. //! \param E The end of the sequence to be partitioned. //! \param P A C++ collable object implementing the predicate. //! \return An iterator to the first element for which the predicate is false. template <typename ItrTy, typename UnaryPredicate> ItrTy partition(ItrTy B, ItrTy E, UnaryPredicate P, sequential_tag) { return std::partition(B, E, P); } } // namespace ripples #endif /* RIPPLES_PARTITION_H */
CPULauncher.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <cassert> #include <vector> #include "Open3D/Core/AdvancedIndexing.h" #include "Open3D/Core/Indexer.h" #include "Open3D/Core/ParallelUtil.h" #include "Open3D/Core/Tensor.h" #include "Open3D/Utility/Console.h" namespace open3d { namespace kernel { class CPULauncher { public: template <typename func_t> static void LaunchUnaryEWKernel(const Indexer& indexer, func_t element_kernel) { #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), indexer.GetOutputPtr(workload_idx)); } } template <typename func_t> static void LaunchBinaryEWKernel(const Indexer& indexer, func_t element_kernel) { #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), indexer.GetInputPtr(1, workload_idx), indexer.GetOutputPtr(workload_idx)); } } template <typename func_t> static void LaunchAdvancedIndexerKernel(const AdvancedIndexer& indexer, func_t element_kernel) { #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(workload_idx), indexer.GetOutputPtr(workload_idx)); } } template <typename scalar_t, typename func_t> static void LaunchReductionKernelSerial(const Indexer& indexer, func_t element_kernel) { for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads(); ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), indexer.GetOutputPtr(workload_idx)); } } /// Create num_threads workers to compute partial reductions and then reduce /// to the final results. This only applies to reduction op with one output. template <typename scalar_t, typename func_t> static void LaunchReductionKernelTwoPass(const Indexer& indexer, func_t element_kernel, scalar_t identity) { if (indexer.NumOutputElements() > 1) { utility::LogError( "Internal error: two-pass reduction only works for " "single-output reduction ops."); } int64_t num_workloads = indexer.NumWorkloads(); int64_t num_threads = parallel_util::GetMaxThreads(); int64_t workload_per_thread = (num_workloads + num_threads - 1) / num_threads; std::vector<scalar_t> thread_results(num_threads, identity); #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) { int64_t start = thread_idx * workload_per_thread; int64_t end = std::min(start + workload_per_thread, num_workloads); for (int64_t workload_idx = start; workload_idx < end; ++workload_idx) { element_kernel(indexer.GetInputPtr(0, workload_idx), &thread_results[thread_idx]); } } void* output_ptr = indexer.GetOutputPtr(0); for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) { element_kernel(&thread_results[thread_idx], output_ptr); } } template <typename scalar_t, typename func_t> static void LaunchReductionParallelDim(const Indexer& indexer, func_t element_kernel) { // Prefers outer dimension >= num_threads. const int64_t* indexer_shape = indexer.GetMasterShape(); const int64_t num_dims = indexer.NumDims(); int64_t num_threads = parallel_util::GetMaxThreads(); // Init best_dim as the outer-most non-reduction dim. int64_t best_dim = num_dims - 1; while (best_dim >= 0 && indexer.IsReductionDim(best_dim)) { best_dim--; } for (int64_t dim = best_dim; dim >= 0 && !indexer.IsReductionDim(dim); --dim) { if (indexer_shape[dim] >= num_threads) { best_dim = dim; break; } else if (indexer_shape[dim] > indexer_shape[best_dim]) { best_dim = dim; } } if (best_dim == -1) { utility::LogError( "Internal error: all dims are reduction dims, use " "LaunchReductionKernelTwoPass instead."); } #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif for (int64_t i = 0; i < indexer_shape[best_dim]; ++i) { Indexer sub_indexer(indexer); sub_indexer.ShrinkDim(best_dim, i, 1); LaunchReductionKernelSerial<scalar_t>(sub_indexer, element_kernel); } } }; } // namespace kernel } // namespace open3d
fir_testing2.c
// // max_01.c // // // /usr/local/bin/gcc-8 -fopenmp -std=c11 -march=native -O3 -ffast-math -o fir_testing2.app fir_testing2.c // ./fir_testing2.app // gcc -std=c11 -mavx -mfma -O3 -ffast-math -o fir_testing2.app fir_testing2.c // gcc -std=c11 -march=native -O3 -ffast-math -o fir_testing2.app fir_testing2.c /* x = rand(1,1e7); [b,a] = cheby1(7,3,2000/100000); tic for i = 1:10 y = filter(b,1,x); end toc/10 %How much slower when 'a' is used? %about 2x as long tic for i = 1:10 y = filter(b,a,x); end toc/10 tic for i = 1:10 y = sl.array.mex_filter(b,a,x); end toc/10 */ #include <omp.h> #include <immintrin.h> #include <stdio.h> #include <time.h> #include <stdint.h> #define N_BYTES_SIMD 4 int main() { //data[i] < threshold && data[i+1] >= threshold //Double - about the same time for sparse data //Single - noticeably faster ... int n_samples = 1e8; clock_t clock_begin; clock_t clock_end; double *x = calloc(n_samples,sizeof(double)); double *y = calloc(n_samples,sizeof(double)); int filter_length = 8; double c[8] = {0.123,0.234,0.345,0.456,0.567,0.678,0.789,0.890}; double time_spent_std = 0; for (int i = 0; i <n_samples; i++){ x[i] = i; } clock_begin = clock(); //664 - just as slow as simple loop if (0){ __m256d y0; __m256d c0 = _mm256_set1_pd(c[0]); __m256d c1 = _mm256_set1_pd(c[1]); __m256d c2 = _mm256_set1_pd(c[2]); __m256d c3 = _mm256_set1_pd(c[3]); __m256d c4 = _mm256_set1_pd(c[4]); __m256d c5 = _mm256_set1_pd(c[5]); __m256d c6 = _mm256_set1_pd(c[6]); __m256d c7 = _mm256_set1_pd(c[7]); __m256d t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15; __m256d f0, f1, f2, f3, f4, f5, f6, f7; __m256d r0, r1; __m256d x03, x47, x12, x36, x21, x25, x30, x14, x41, x52, x63, x74; __m256d x0,x1,x2,x3,x4,x5,x6,x7; for (int j = filter_length; j < n_samples; j+=8) { //7 6 5 4 3 2 1 0 -1 -2 -3 -4 -5 -6 -7 // 0 0 0 0. 0 0 0 0 x03 x47 // 1 1 1 1. 1 1 1 1 x12 x36 // 2 2 2 2. 2 2 2 2 x21 x25 // 3 3 3 3. 3 3 3 3 x30 x14 // 4 4 4 4.4 4 4 4 x41 x03 <= overlap with first entry // 5 5 5 5.5 5 5 5 x52 x12 // 6 6 6 6.6 6 6 6 x63 x21 //7 7 7 7.7 7 7 7 x74 x30 //To collapse we need // 7 6 5 4 3 2 1 0 <= index of final assignment //c0 * 7 6 5 4, 3 2 1 0 x74,x30 //c1 * 6 5 4 3, 2 1 0 -1 x63,x21 t2,t3 //c2 * 5 4 3 2, 1 0 -1 -2 x52 x12 //c3 * 4 3 2 1, 0 -1 -2 -3 x41 x03 t6,t7 //c4 * 3 2 1 0, -1 -2 -3 -4 x30,x14 //c5 * 2 1 0 -1, -2 -3 -4 -5 x21,x25 t10,t11 //c6 * 1 0 -1 -2, -3 -4 -5 -6 x12,x36 //c7 * 0 -1 -2 -3, -4 -5 -6 -7 x03,x47 t14,t15 x74 = _mm256_loadu_pd(&x[j+4]); x63 = _mm256_loadu_pd(&x[j+3]); x52 = _mm256_loadu_pd(&x[j+2]); x41 = _mm256_loadu_pd(&x[j+1]); x30 = _mm256_loadu_pd(&x[j+0]); x21 = _mm256_loadu_pd(&x[j-1]); x12 = _mm256_loadu_pd(&x[j-2]); x03 = _mm256_loadu_pd(&x[j-3]); x14 = _mm256_loadu_pd(&x[j-4]); x25 = _mm256_loadu_pd(&x[j-5]); x36 = _mm256_loadu_pd(&x[j-6]); x47 = _mm256_loadu_pd(&x[j-7]); t0 = _mm256_mul_pd(c0,x74); t1 = _mm256_mul_pd(c0,x30); t2 = _mm256_fmadd_pd(c1,x63,t0); t3 = _mm256_fmadd_pd(c1,x21,t1); t4 = _mm256_mul_pd(c2,x52); t5 = _mm256_mul_pd(c2,x12); t6 = _mm256_fmadd_pd(c3,x41,t4); t7 = _mm256_fmadd_pd(c3,x03,t5); t8 = _mm256_mul_pd(c4,x30); t9 = _mm256_mul_pd(c4,x14); t10 = _mm256_fmadd_pd(c5,x21,t8); t11 = _mm256_fmadd_pd(c5,x25,t9); t12 = _mm256_mul_pd(c6,x12); t13 = _mm256_mul_pd(c6,x36); t14 = _mm256_fmadd_pd(c7,x03,t12); t15 = _mm256_fmadd_pd(c7,x47,t13); f0 = _mm256_add_pd(t3,t7); f1 = _mm256_add_pd(t11,t15); r0 = _mm256_add_pd(f0,f1); _mm256_storeu_pd(&y[j], r0); f2 = _mm256_add_pd(t2,t6); f3 = _mm256_add_pd(t10,t14); r1 = _mm256_add_pd(f2,f3); _mm256_storeu_pd(&y[j+3], r1); } } //573 if (0){ __m256d x0,x1,x2,x3,x4,x5,x6,x7; //0 - 0 back __m256d c0 = _mm256_set1_pd(c[0]); __m256d c1 = _mm256_set1_pd(c[1]); __m256d c2 = _mm256_set1_pd(c[2]); __m256d c3 = _mm256_set1_pd(c[3]); __m256d c4 = _mm256_set1_pd(c[4]); __m256d c5 = _mm256_set1_pd(c[5]); __m256d c6 = _mm256_set1_pd(c[6]); __m256d c7 = _mm256_set1_pd(c[6]); __m256d y0, y1, y2, y3, y4, y5, y6, y7; // 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 (round 1) // 3 2 1 0 <= 0 back // 6 5 4 3 <= 1 back // 9 8 7 6 <= 2 back // // // 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 (round 2) // 7 6 5 4 <= 0 back // 10 9 8 7 //sprintf("stop: %d\n",stops[99]); //omp_set_num_threads(1); //#pragma omp parallel for simd int cur_start = 0; y1 = _mm256_loadu_pd(x); //0:3 => becomes y0 on first run y2 = _mm256_loadu_pd(x+4); //4:7 => becomes y1 on first run y3 = _mm256_loadu_pd(x+8); y4 = _mm256_loadu_pd(x+12); y5 = _mm256_loadu_pd(x+16); y6 = _mm256_loadu_pd(x+20); y7 = _mm256_loadu_pd(x+24); for (int j = 28; j < n_samples; j+=4){ //TODO: Can we avoid the awkward loads???? //Perhaps store temporary variables ... //Can we do the shift in the fma stage???? y0 = y1; y1 = y2; y2 = y3; y3 = y4; y4 = y5; y5 = y6; y6 = y7; x0 = _mm256_loadu_pd(&x[j-28-0]); x1 = _mm256_loadu_pd(&x[j-24-1]); x2 = _mm256_loadu_pd(&x[j-20-2]); x3 = _mm256_loadu_pd(&x[j-16-3]); x4 = _mm256_loadu_pd(&x[j-12-4]); x5 = _mm256_loadu_pd(&x[j-8-5]); x6 = _mm256_loadu_pd(&x[j-4-6]); x7 = _mm256_loadu_pd(&x[j-0-7]); y0 = _mm256_add_pd(c0,x0); _mm256_storeu_pd(&y[j-28], y0); y1 = _mm256_fmadd_pd(c1,x1,y1); y2 = _mm256_fmadd_pd(c2,x2,y2); y3 = _mm256_fmadd_pd(c3,x3,y3); y4 = _mm256_fmadd_pd(c4,x4,y4); y5 = _mm256_fmadd_pd(c5,x5,y5); y6 = _mm256_fmadd_pd(c6,x6,y6); y7 = _mm256_fmadd_pd(c7,x7,y7); } //TODO: We still have a little remaining at the end to handle ... } //653 if (1){ //Standard loop approach ---------------------------------------------- #pragma omp parallel for simd for (int j = filter_length; j < n_samples; j++) { y[j] = c[0]*x[j] + c[1]*x[j-1] + c[2]*x[j-2] + c[3]*x[j-3] + + c[4]*x[j-4] + c[5]*x[j-5] + c[6]*x[j-6] + c[7]*x[j-7]; } } clock_end = clock(); time_spent_std += (double)(clock_end - clock_begin) / CLOCKS_PER_SEC; printf("time (ms): %g\n",1000*time_spent_std); printf("y_last: %g\n",y[n_samples-1]); }
magsac.h
#pragma once #include <limits> #include <chrono> #include <memory> #include "model.h" #include "model_score.h" #include "sampler.h" #include "uniform_sampler.h" #include <math.h> #include "gamma_values.cpp" #ifdef _WIN32 #include <ppl.h> #endif template <class DatumType, class ModelEstimator> class MAGSAC { public: enum Version { // The original version of MAGSAC. It works well, however, can be quite slow in many cases. MAGSAC_ORIGINAL, // The recently proposed MAGSAC++ algorithm which keeps the accuracy of the original MAGSAC but is often orders of magnitude faster. MAGSAC_PLUS_PLUS }; MAGSAC(const Version magsac_version_ = Version::MAGSAC_PLUS_PLUS) : time_limit(std::numeric_limits<double>::max()), // desired_fps(-1), iteration_limit(std::numeric_limits<size_t>::max()), maximum_threshold(10.0), apply_post_processing(true), mininum_iteration_number(50), partition_number(5), core_number(1), number_of_irwls_iters(1), interrupting_threshold(1.0), last_iteration_number(0), log_confidence(0), point_number(0), magsac_version(magsac_version_) { } ~MAGSAC() {} // A function to run MAGSAC. bool run( const cv::Mat &points_, // The input data points const double confidence_, // The required confidence in the results ModelEstimator& estimator_, // The model estimator gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, // The sampler used gcransac::Model &obtained_model_, // The estimated model parameters int &iteration_number_, // The number of iterations done ModelScore &model_score_); // The score of the estimated model // A function to set the maximum inlier-outlier threshold void setMaximumThreshold(const double maximum_threshold_) { maximum_threshold = maximum_threshold_; } void setVersion(const Version magsac_version_) { magsac_version = magsac_version_; } // A function to set the inlier-outlier threshold used for speeding up the procedure // and for determining the required number of iterations. void setReferenceThreshold(const double threshold_) { interrupting_threshold = threshold_; } double getReferenceThreshold() { return interrupting_threshold; } // Setting the flag determining if post-processing is needed void applyPostProcessing(bool value_) { apply_post_processing = value_; } // A function to set the maximum number of iterations void setIterationLimit(size_t iteration_limit_) { iteration_limit = iteration_limit_; } // A function to set the minimum number of iterations void setMinimumIterationNumber(size_t mininum_iteration_number_) { mininum_iteration_number = mininum_iteration_number_; } // A function to set the number of cores used in the original MAGSAC algorithm. // In MAGSAC++, it is not used. Note that when multiple MAGSACs run in parallel, // it is beneficial to keep the core number one for each independent MAGSAC. // Otherwise, the threads will act weirdly. void setCoreNumber(size_t core_number_) { if (magsac_version == Version::MAGSAC_PLUS_PLUS) fprintf(stderr, "Setting the core number for MAGSAC++ is deprecated."); core_number = core_number_; } // Setting the number of partitions used in the original MAGSAC algorithm // to speed up the procedure. In MAGSAC++, this parameter is not used. void setPartitionNumber(size_t partition_number_) { if (magsac_version == Version::MAGSAC_PLUS_PLUS) fprintf(stderr, "Setting the partition number for MAGSAC++ is deprecated."); partition_number = partition_number_; } // A function to set a desired minimum frames-per-second (FPS) value. void setFPS(int fps_) { desired_fps = fps_; // The required FPS. // The time limit which the FPS implies time_limit = fps_ <= 0 ? std::numeric_limits<double>::max() : 1.0 / fps_; } // The post-processing algorithm applying sigma-consensus to the input model once. bool postProcessing( const cv::Mat &points, // All data points const gcransac::Model &so_far_the_best_model, // The input model to be improved gcransac::Model &output_model, // The improved model parameters ModelScore &output_score, // The score of the improved model const ModelEstimator &estimator); // The model estimator // The function determining the quality/score of a model using the original MAGSAC // criterion. Note that this function is significantly slower than the quality // function of MAGSAC++. void getModelQuality( const cv::Mat& points_, // All data points const gcransac::Model& model_, // The input model const ModelEstimator& estimator_, // The model estimator double& marginalized_iteration_number_, // The required number of iterations marginalized over the noise scale double& score_); // The score/quality of the model // The function determining the quality/score of a // model using the MAGSAC++ criterion. void getModelQualityPlusPlus( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &score_, // The score to be calculated const double &previous_best_score_); // The score of the previous so-far-the-best model size_t number_of_irwls_iters; protected: Version magsac_version; // The version of MAGSAC used size_t iteration_limit; // Maximum number of iterations allowed size_t mininum_iteration_number; // Minimum number of iteration before terminating double maximum_threshold; // The maximum sigma value size_t core_number; // Number of core used in sigma-consensus double time_limit; // A time limit after the algorithm is interrupted int desired_fps; // The desired FPS (TODO: not tested with MAGSAC) bool apply_post_processing; // Decides if the post-processing step should be applied int point_number; // The current point number int last_iteration_number; // The iteration number implied by the last run of sigma-consensus double log_confidence; // The logarithm of the required confidence size_t partition_number; // Number of partitions used to speed up sigma-consensus double interrupting_threshold; // A threshold to speed up MAGSAC by interrupting the sigma-consensus procedure whenever there is no chance of being better than the previous so-far-the-best model bool sigmaConsensus( const cv::Mat& points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore& score_, const ModelEstimator& estimator_, const ModelScore& best_score_); bool sigmaConsensusPlusPlus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_); }; template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::run( const cv::Mat& points_, const double confidence_, ModelEstimator& estimator_, gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, gcransac::Model& obtained_model_, int& iteration_number_, ModelScore &model_score_) { // Initialize variables std::chrono::time_point<std::chrono::system_clock> start, end; // Variables for time measuring: start and end times std::chrono::duration<double> elapsed_seconds; // Variables for time measuring: elapsed time log_confidence = log(1.0 - confidence_); // The logarithm of 1 - confidence point_number = points_.rows; // Number of points const int sample_size = estimator_.sampleSize(); // The sample size required for the estimation size_t max_iteration = iteration_limit; // The maximum number of iterations initialized to the iteration limit int iteration = 0; // Current number of iterations gcransac::Model so_far_the_best_model; // Current best model ModelScore so_far_the_best_score; // The score of the current best model std::unique_ptr<size_t[]> minimal_sample(new size_t[sample_size]); // The sample used for the estimation std::vector<size_t> pool(points_.rows); for (size_t point_idx = 0; point_idx < point_number; ++point_idx) pool[point_idx] = point_idx; if (points_.rows < sample_size) { fprintf(stderr, "There are not enough points for applying robust estimation. Minimum is %d; while %d are given.\n", sample_size, points_.rows); return false; } // Set the start time variable if there is some time limit set if (desired_fps > -1) start = std::chrono::system_clock::now(); constexpr size_t max_unsuccessful_model_generations = 50; // Main MAGSAC iteration while (mininum_iteration_number > iteration || iteration < max_iteration) { // Increase the current iteration number ++iteration; // Sample a minimal subset std::vector<gcransac::Model> models; // The set of estimated models size_t unsuccessful_model_generations = 0; // The number of unsuccessful model generations // Try to select a minimal sample and estimate the implied model parameters while (++unsuccessful_model_generations < max_unsuccessful_model_generations) { // Get a minimal sample randomly if (!sampler_.sample(pool, // The index pool from which the minimal sample can be selected minimal_sample.get(), // The minimal sample sample_size)) // The size of a minimal sample continue; // Check if the selected sample is valid before estimating the model // parameters which usually takes more time. if (!estimator_.isValidSample(points_, // All points minimal_sample.get())) // The current sample continue; // Estimate the model from the minimal sample if (estimator_.estimateModel(points_, // All data points minimal_sample.get(), // The selected minimal sample &models)) // The estimated models break; } // If the method was not able to generate any usable models, break the cycle. iteration += unsuccessful_model_generations - 1; // Select the so-far-the-best from the estimated models for (const auto &model : models) { ModelScore score; // The score of the current model gcransac::Model refined_model; // The refined model parameters // Apply sigma-consensus to refine the model parameters by marginalizing over the noise level sigma bool success; if (magsac_version == Version::MAGSAC_ORIGINAL) success = sigmaConsensus(points_, model, refined_model, score, estimator_, so_far_the_best_score); else success = sigmaConsensusPlusPlus(points_, model, refined_model, score, estimator_, so_far_the_best_score); // Continue if the model was rejected if (!success || score.score == -1) continue; // Save the iteration number when the current model is found score.iteration = iteration; // Update the best model parameters if needed if (so_far_the_best_score < score) { so_far_the_best_model = refined_model; // Update the best model parameters so_far_the_best_score = score; // Update the best model's score max_iteration = MIN(max_iteration, last_iteration_number); // Update the max iteration number, but do not allow to increase } } // Update the time parameters if a time limit is set if (desired_fps > -1) { end = std::chrono::system_clock::now(); elapsed_seconds = end - start; // Interrupt if the time limit is exceeded if (elapsed_seconds.count() > time_limit) break; } } // Apply sigma-consensus as a post processing step if needed and the estimated model is valid if (apply_post_processing) { // TODO } obtained_model_ = so_far_the_best_model; iteration_number_ = iteration; model_score_ = so_far_the_best_score; return so_far_the_best_score.score > 0; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::postProcessing( const cv::Mat &points_, const gcransac::Model &model_, gcransac::Model &refined_model_, ModelScore &refined_score_, const ModelEstimator &estimator_) { fprintf(stderr, "Sigma-consensus++ is not implemented yet as post-processing.\n"); return false; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_) { // Set up the parameters constexpr double L = 1.05; constexpr double k = ModelEstimator::getSigmaQuantile(); constexpr double threshold_to_sigma_multiplier = 1.0 / k; constexpr size_t sample_size = estimator_.sampleSize(); static auto comparator = [](std::pair<double, int> left, std::pair<double, int> right) { return left.first < right.first; }; const int point_number = points_.rows; double current_maximum_sigma = this->maximum_threshold; // Calculating the residuals std::vector< std::pair<double, size_t> > all_residuals; all_residuals.reserve(point_number); // If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better if (best_score_.inlier_number > 0) { // Number of inliers which should be exceeded int points_remaining = best_score_.inlier_number; // Collect the points which are closer than the threshold which the maximum sigma implies for (int point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index all_residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) --points_remaining; } // Interrupt if there is no chance of being better // TODO: replace this part by SPRT test if (point_number - point_idx < points_remaining) return false; } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = best_score_.inlier_number - points_remaining; } else { // The number of really close points size_t points_close = 0; // Collect the points which are closer than the threshold which the maximum sigma implies for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index all_residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; } std::vector<gcransac::Model> sigma_models; std::vector<size_t> sigma_inliers; std::vector<double> final_weights; // The number of possible inliers const size_t possible_inlier_number = all_residuals.size(); // Sort the residuals in ascending order std::sort(all_residuals.begin(), all_residuals.end(), comparator); // The maximum threshold is set to be slightly bigger than the distance of the // farthest possible inlier. current_maximum_sigma = all_residuals.back().first + std::numeric_limits<double>::epsilon(); const double sigma_step = current_maximum_sigma / partition_number; last_iteration_number = 10000; score_.score = 0; // The weights calculated by each parallel process std::vector<std::vector<double>> point_weights_par(partition_number, std::vector<double>(possible_inlier_number, 0)); // If OpenMP is used, calculate things in parallel #ifdef USE_OPENMP #pragma omp parallel for num_threads(core_number) for (int partition_idx = 0; partition_idx < partition_number; ++partition_idx) { // The maximum sigma value in the current partition const double max_sigma = (partition_idx + 1) * sigma_step; // Find the last element which has smaller distance than 'max_threshold' // Since the vector is ordered binary search can be used to find that particular element. const auto &last_element = std::upper_bound(all_residuals.begin(), all_residuals.end(), std::make_pair(max_sigma, 0), comparator); const size_t sigma_inlier_number = last_element - all_residuals.begin(); // Put the indices into a vector std::vector<size_t> sigma_inliers; sigma_inliers.reserve(sigma_inlier_number); // Store the points which are closer than the current sigma limit for (size_t relative_point_idx = 0; relative_point_idx < sigma_inlier_number; ++relative_point_idx) sigma_inliers.emplace_back(all_residuals[relative_point_idx].second); // Check if there are enough inliers to fit a model if (sigma_inliers.size() > sample_size) { // Estimating the model which the current set of inliers imply std::vector<gcransac::Model> sigma_models; estimator_.estimateModelNonminimal(points_, &(sigma_inliers)[0], sigma_inlier_number, &sigma_models); // If the estimation was successful calculate the implied probabilities if (sigma_models.size() == 1) { const double max_sigma_squared_2 = 2 * max_sigma * max_sigma; double residual_i_2, // The residual of the i-th point probability_i; // The probability of the i-th point // Iterate through all points to estimate the related probabilities for (size_t relative_point_idx = 0; relative_point_idx < sigma_inliers.size(); ++relative_point_idx) { // TODO: Replace with Chi-square instead of normal distribution const size_t &point_idx = sigma_inliers[relative_point_idx]; // Calculate the residual of the current point residual_i_2 = estimator_.squaredResidual(points_.row(point_idx), sigma_models[0]); // Calculate the probability of the i-th point assuming Gaussian distribution // TODO: replace by Chi-square distribution probability_i = exp(-residual_i_2 / max_sigma_squared_2); // Store the probability of the i-th point coming from the current partition point_weights_par[partition_idx][relative_point_idx] += probability_i; } } } } #else fprintf(stderr, "Not implemented yet.\n"); #endif // The weights used for the final weighted least-squares fitting final_weights.reserve(possible_inlier_number); // Collect all points which has higher probability of being inlier than zero sigma_inliers.reserve(possible_inlier_number); for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx) { // Calculate the weight of the current point double weight = 0.0; for (size_t partition_idx = 0; partition_idx < partition_number; ++partition_idx) weight += point_weights_par[partition_idx][point_idx]; // If the weight is approx. zero, continue. if (weight < std::numeric_limits<double>::epsilon()) continue; // Store the index and weight of the current point sigma_inliers.emplace_back(all_residuals[point_idx].second); final_weights.emplace_back(weight); } // If there are fewer inliers than the size of the minimal sample interupt the procedure if (sigma_inliers.size() < sample_size) return false; // Estimate the model parameters using weighted least-squares fitting if (!estimator_.estimateModelNonminimal( points_, // All input points &(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier static_cast<int>(sigma_inliers.size()), // Number of possible inliers &sigma_models, // Estimated models &(final_weights)[0])) // Weights of points return false; bool is_model_updated = false; if (sigma_models.size() == 1 && // If only a single model is estimated estimator_.isValidModel(sigma_models.back(), points_, sigma_inliers, &(sigma_inliers)[0], interrupting_threshold, is_model_updated)) // and it is valid { // Return the refined model refined_model_ = sigma_models.back(); // Calculate the score of the model and the implied iteration number double marginalized_iteration_number; getModelQuality(points_, // All the input points refined_model_, // The estimated model estimator_, // The estimator marginalized_iteration_number, // The marginalized inlier ratio score_.score); // The marginalized score if (marginalized_iteration_number < 0 || std::isnan(marginalized_iteration_number)) last_iteration_number = std::numeric_limits<int>::max(); else last_iteration_number = static_cast<int>(round(marginalized_iteration_number)); return true; } return false; } template <class DatumType, class ModelEstimator> bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensusPlusPlus( const cv::Mat &points_, const gcransac::Model& model_, gcransac::Model& refined_model_, ModelScore &score_, const ModelEstimator &estimator_, const ModelScore &best_score_) { // The degrees of freedom of the data from which the model is estimated. // E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4. constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom(); // A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals constexpr double k = ModelEstimator::getSigmaQuantile(); // A multiplier to convert residual values to sigmas constexpr double threshold_to_sigma_multiplier = 1.0 / k; // Calculating k^2 / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double squared_k_per_2 = k * k / 2.0; // Calculating (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0; // TODO: check constexpr double C = ModelEstimator::getC(); // The size of a minimal sample used for the estimation constexpr size_t sample_size = estimator_.sampleSize(); // Calculating 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof = std::pow(2.0, dof_minus_one_per_two); // Calculating C * 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double C_times_two_ad_dof = C * two_ad_dof; // Calculating the gamma value of (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double gamma_value = tgamma(dof_minus_one_per_two); // Calculating the upper incomplete gamma value of (DoF - 1) / 2 with k^2 / 2. constexpr double gamma_k = ModelEstimator::getUpperIncompleteGammaOfK(); // Calculating the lower incomplete gamma value of (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double gamma_difference = gamma_value - gamma_k; // The number of points provided const int point_number = points_.rows; // The manually set maximum inlier-outlier threshold double current_maximum_sigma = this->maximum_threshold; // Calculating the pairs of (residual, point index). std::vector< std::pair<double, size_t> > residuals; // Occupy the maximum required memory to avoid doing it later. residuals.reserve(point_number); // If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better if (best_score_.inlier_number > 0) { // Number of points close to the previous so-far-the-best model. // This model should have more inliers. int points_remaining = best_score_.inlier_number; // Collect the points which are closer than the threshold which the maximum sigma implies for (int point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // all_residuals.emplace_back(std::make_pair(residual * threshold_to_sigma_multiplier, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) --points_remaining; } // Interrupt if there is no chance of being better // TODO: replace this part by SPRT test if (point_number - point_idx < points_remaining) return false; } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = best_score_.inlier_number - points_remaining; } else { // The number of really close points size_t points_close = 0; // Collect the points which are closer than the threshold which the maximum sigma implies for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), model_); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; } // Models fit by weighted least-squares fitting std::vector<gcransac::Model> sigma_models; // Points used in the weighted least-squares fitting std::vector<size_t> sigma_inliers; // Weights used in the the weighted least-squares fitting std::vector<double> sigma_weights; // Number of points considered in the fitting const size_t possible_inlier_number = residuals.size(); // Occupy the memory to avoid doing it inside the calculation possibly multiple times sigma_inliers.reserve(possible_inlier_number); // Occupy the memory to avoid doing it inside the calculation possibly multiple times sigma_weights.reserve(possible_inlier_number); // Calculate 2 * \sigma_{max}^2 a priori const double squared_sigma_max_2 = current_maximum_sigma * current_maximum_sigma * 2.0; // Divide C * 2^(DoF - 1) by \sigma_{max} a priori const double one_over_sigma = C_times_two_ad_dof / current_maximum_sigma; // Calculate the weight of a point with 0 residual (i.e., fitting perfectly) a priori const double weight_zero = one_over_sigma * gamma_difference; // Initialize the polished model with the initial one gcransac::Model polished_model = model_; // A flag to determine if the initial model has been updated bool updated = false; // Do the iteratively re-weighted least squares fitting for (size_t iterations = 0; iterations < number_of_irwls_iters; ++iterations) { // If the current iteration is not the first, the set of possibly inliers // (i.e., points closer than the maximum threshold) have to be recalculated. if (iterations > 0) { // The number of points close to the model size_t points_close = 0; // Remove everything from the residual vector residuals.clear(); // Collect the points which are closer than the maximum threshold for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residual(points_.row(point_idx), polished_model); if (current_maximum_sigma > residual) { // Store the residual of the current point and its index residuals.emplace_back(std::make_pair(residual, point_idx)); // Count points which are closer than a reference threshold to speed up the procedure if (residual < interrupting_threshold) ++points_close; } } // Store the number of really close inliers just to speed up the procedure // by interrupting the next verifications. score_.inlier_number = points_close; // Number of points closer than the threshold const size_t possible_inlier_number = residuals.size(); // Clear the inliers and weights sigma_inliers.clear(); sigma_weights.clear(); // Occupy the memory for the inliers and weights sigma_inliers.reserve(possible_inlier_number); sigma_weights.reserve(possible_inlier_number); } // Calculate the weight of each point for (size_t res_idx = 0; res_idx < residuals.size(); ++res_idx) { const std::pair<double, size_t> &pair = residuals[res_idx]; const double &residual = pair.first; const size_t &idx = pair.second; // The weight double weight = 0.0; // If the residual is ~0, the point fits perfectly and it is handled differently if (residual < std::numeric_limits<double>::epsilon()) weight = weight_zero; else { // Calculate the squared residual const double squared_residual = residual * residual; // Get the position of the gamma value in the lookup table size_t x = round(precision_of_stored_gammas * squared_residual / squared_sigma_max_2); // Put the index of the point into the vector of points used for the least squares fitting sigma_inliers.emplace_back(idx); // If the sought gamma value is not stored in the lookup, return the closest element if (stored_gamma_number < x) x = stored_gamma_number; // Calculate the weight of the point weight = one_over_sigma * (stored_gamma_values[x] - gamma_k); } // Store the weight of the point sigma_weights.emplace_back(weight); } // If there are fewer than the minimum point close to the model, // terminate. if (sigma_inliers.size() < sample_size) return false; // Estimate the model parameters using weighted least-squares fitting if (!estimator_.estimateModelNonminimal( points_, // All input points &(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier static_cast<int>(sigma_inliers.size()), // Number of possible inliers &sigma_models, // Estimated models &(sigma_weights)[0])) // Weights of points { // If the estimation failed and the iteration was never successfull, // terminate with failure. if (iterations == 0) return false; // Otherwise, if the iteration was successfull at least one, // simply break it. break; } // Update the model parameters polished_model = sigma_models[0]; // Clear the vector of models and keep only the best sigma_models.clear(); // The model has been updated updated = true; } bool is_model_updated = false; if (updated && // If the model has been updated estimator_.isValidModel(polished_model, points_, sigma_inliers, &(sigma_inliers[0]), interrupting_threshold, is_model_updated)) // and it is valid { // Return the refined model refined_model_ = polished_model; // Calculate the score of the model and the implied iteration number double marginalized_iteration_number; getModelQualityPlusPlus(points_, // All the input points refined_model_, // The estimated model estimator_, // The estimator score_.score, // The marginalized score best_score_.score); // The score of the previous so-far-the-best model // Update the iteration number last_iteration_number = log_confidence / log(1.0 - std::pow(static_cast<double>(score_.inlier_number) / point_number, sample_size)); return true; } return false; } template <class DatumType, class ModelEstimator> void MAGSAC<DatumType, ModelEstimator>::getModelQualityPlusPlus( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &score_, // The score to be calculated const double &previous_best_score_) // The score of the previous so-far-the-best model { // The degrees of freedom of the data from which the model is estimated. // E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4. constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom(); // A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals constexpr double k = ModelEstimator::getSigmaQuantile(); // A multiplier to convert residual values to sigmas constexpr double threshold_to_sigma_multiplier = 1.0 / k; // Calculating k^2 / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double squared_k_per_2 = k * k / 2.0; // Calculating (DoF - 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0; // Calculating (DoF + 1) / 2 which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. constexpr double dof_plus_one_per_two = (degrees_of_freedom + 1.0) / 2.0; // TODO: check constexpr double C = 0.25; // Calculating 2^(DoF - 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof_minus_one = std::pow(2.0, dof_minus_one_per_two); // Calculating 2^(DoF + 1) which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. static const double two_ad_dof_plus_one = std::pow(2.0, dof_plus_one_per_two); // Calculate the gamma value of k constexpr double gamma_value_of_k = ModelEstimator::getUpperIncompleteGammaOfK(); // Calculate the lower incomplete gamma value of k constexpr double lower_gamma_value_of_k = ModelEstimator::getLowerIncompleteGammaOfK(); // The number of points provided const int point_number = points_.rows; // The previous best loss const double previous_best_loss = 1.0 / previous_best_score_; // Convert the maximum threshold to a sigma value const double maximum_sigma = threshold_to_sigma_multiplier * maximum_threshold; // Calculate the squared maximum sigma const double maximum_sigma_2 = maximum_sigma * maximum_sigma; // Calculate \sigma_{max}^2 / 2 const double maximum_sigma_2_per_2 = maximum_sigma_2 / 2.0; // Calculate 2 * \sigma_{max}^2 const double maximum_sigma_2_times_2 = maximum_sigma_2 * 2.0; // Calculate the loss implied by an outlier const double outlier_loss = maximum_sigma * two_ad_dof_minus_one * lower_gamma_value_of_k; // Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and, // due to being constant, it is better to calculate it a priori. const double two_ad_dof_plus_one_per_maximum_sigma = two_ad_dof_plus_one / maximum_sigma; // The loss which a point implies double loss = 0.0, // The total loss regarding the current model total_loss = 0.0; // Iterate through all points to calculate the implied loss for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residualForScoring(points_.row(point_idx), model_.descriptor); // If the residual is smaller than the maximum threshold, consider it outlier // and add the loss implied to the total loss. if (maximum_threshold < residual) loss = outlier_loss; else // Otherwise, consider the point inlier, and calculate the implied loss { // Calculate the squared residual const double squared_residual = residual * residual; // Divide the residual by the 2 * \sigma^2 const double squared_residual_per_sigma = squared_residual / maximum_sigma_2_times_2; // Get the position of the gamma value in the lookup table size_t x = round(precision_of_stored_incomplete_gammas * squared_residual_per_sigma); // If the sought gamma value is not stored in the lookup, return the closest element if (stored_incomplete_gamma_number < x) x = stored_incomplete_gamma_number; // Calculate the loss implied by the current point loss = maximum_sigma_2_per_2 * stored_lower_incomplete_gamma_values[x] + squared_residual / 4.0 * (stored_complete_gamma_values[x] - gamma_value_of_k); loss = loss * two_ad_dof_plus_one_per_maximum_sigma; } // Update the total loss total_loss += loss; // Break the validation if there is no chance of being better than the previous // so-far-the-best model. if (previous_best_loss < total_loss) break; } // Calculate the score of the model from the total loss score_ = 1.0 / total_loss; } template <class DatumType, class ModelEstimator> void MAGSAC<DatumType, ModelEstimator>::getModelQuality( const cv::Mat &points_, // All data points const gcransac::Model &model_, // The model parameter const ModelEstimator &estimator_, // The model estimator class double &marginalized_iteration_number_, // The marginalized iteration number to be calculated double &score_) // The score to be calculated { // Set up the parameters constexpr size_t sample_size = estimator_.sampleSize(); const size_t point_number = points_.rows; // Getting the inliers std::vector<std::pair<double, size_t>> all_residuals; all_residuals.reserve(point_number); double max_distance = 0; for (size_t point_idx = 0; point_idx < point_number; ++point_idx) { // Calculate the residual of the current point const double residual = estimator_.residualForScoring(points_.row(point_idx), model_.descriptor); // If the residual is smaller than the maximum threshold, add it to the set of possible inliers if (maximum_threshold > residual) { max_distance = MAX(max_distance, residual); all_residuals.emplace_back(std::make_pair(residual, point_idx)); } } // Set the maximum distance to be slightly bigger than that of the farthest possible inlier max_distance = max_distance + std::numeric_limits<double>::epsilon(); // Number of possible inliers const size_t possible_inlier_number = all_residuals.size(); // The extent of a partition const double threshold_step = max_distance / partition_number; // The maximum threshold considered in each partition std::vector<double> thresholds(partition_number); std::vector<double> thresholds_squared(partition_number); std::vector<double> thresholds_2_squared(partition_number); // Calculating the thresholds for each partition for (size_t i = 0; i < partition_number; ++i) { thresholds[i] = (i + 1) * threshold_step; thresholds_squared[i] = thresholds[i] * thresholds[i]; thresholds_2_squared[i] = 2 * thresholds_squared[i]; } double residual_i, // Residual of the i-th point residual_i_squared, // Squared residual of the i-th poin probability_i; // Probability of the i-th point given the model std::vector<double> inliers(partition_number, 0), // RANSAC score for each partition probabilities(partition_number, 1); // Probabilities for each partition for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx) { residual_i = all_residuals[point_idx].first; residual_i_squared = residual_i * residual_i; for (size_t i = 0; i < partition_number; ++i) { if (residual_i < thresholds[i]) { probability_i = 1.0 - residual_i_squared / thresholds_squared[i]; ++inliers[i]; probabilities[i] += probability_i; } } } score_ = 0; marginalized_iteration_number_ = 0.0; for (auto i = 0; i < partition_number; ++i) { score_ += probabilities[i]; marginalized_iteration_number_ += log_confidence / log(1.0 - std::pow(inliers[i] / point_number, sample_size)); } marginalized_iteration_number_ = marginalized_iteration_number_ / partition_number; }
Instrument.h
/* +-----------------------------------+ | | |***FLOP counting instrumentation***| | | | Copyright (c) -tHE SWINe- 2016 | | | | Instrument.h | | | +-----------------------------------+ */ #pragma once #ifndef __FLOP_COUNTING_SCALAR_INCLUDED #define __FLOP_COUNTING_SCALAR_INCLUDED /** * @file include/sparse_flops/Instrument.h * @brief FLOP counting instrumentation of scalar types * @date 2016 * @author -tHE SWINe- */ #include "slam/Integer.h" #include <math.h> /** * @brief wrapper around numeric type which counts operations * * @tparam CBaseSclar is base scalar type * * @note This does not implement conversion operator to the base type, to avoid errors. * @note The in-place operations (e.g. a +=) with base type on the left are not implemented * for the same reason (would most likely lead to not counting some operations). * @note The counters are implemented using OpenMP atomics so this is thread-safe to a * certain degree. * @note The new math functions introduced in C++11 are not wrapped. */ template <class CBaseSclar> class CFLOPCountingScalar { public: typedef CBaseSclar _TyBase; /**< @brief base type */ typedef void (CFLOPCountingScalar::*Boolean)() const; /**< @brief true value for the safe bool idiom */ typedef size_t _TyCount; /**< @brief instruction counter data type */ protected: _TyBase m_f_value; /**< @brief value */ static _TyCount m_n_add_num; /**< @brief counter for addition operations */ static _TyCount m_n_mul_num; /**< @brief counter for multiplication operations */ static _TyCount m_n_div_num; /**< @brief counter for division operations */ static _TyCount m_n_trcd_num; /**< @brief counter for transcendental operations */ static _TyCount m_n_cmp_num; /**< @brief counter for comparison operations */ public: /** * @brief resets the values of all the counters */ static void Reset_Counters() { #pragma omp atomic m_n_add_num ^= m_n_add_num; #pragma omp atomic m_n_mul_num ^= m_n_mul_num; #pragma omp atomic m_n_div_num ^= m_n_div_num; #pragma omp atomic m_n_trcd_num ^= m_n_trcd_num; #pragma omp atomic m_n_cmp_num ^= m_n_cmp_num; } /** * @brief gets the addition operation counter value * @return Returns the number of addition operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Add_Num() { return m_n_add_num; } /** * @brief gets the multiplication operation counter value * @return Returns the number of multiplication operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Multiply_Num() { return m_n_mul_num; } /** * @brief gets the division operation counter value * @return Returns the number of division operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Divide_Num() { return m_n_div_num; } /** * @brief gets the transcendental operation counter value * @return Returns the number of transcendental operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Transcendental_Num() { return m_n_trcd_num; } /** * @brief gets the comparison operation counter value * @return Returns the number of comparison operations * since the last call to \ref Reset_Counters(). */ static inline _TyCount n_Comparison_Num() { return m_n_cmp_num; } /** * @brief gets the sum of all operation counter values * @return Returns the number of (all types of) operations * since the last call to \ref Reset_Counters(). * @note This sum is equally weighted. */ static inline _TyCount n_FLOP_Num() { return m_n_add_num + m_n_mul_num + m_n_div_num + m_n_trcd_num + m_n_cmp_num; } /** * @brief default constructor; has no effect */ CFLOPCountingScalar() {} /** * @brief constructor; initializes the value * @param[in] f_value is value to initialize to */ CFLOPCountingScalar(_TyBase f_value) :m_f_value(f_value) {} /** * @brief constructor; initializes to value obtained by a transcendental operation(s) * * @param[in] f_value is value to initialize to * @param[in] n_transcendent_operation_num is number of * transcendent operations it took to obtain the value */ CFLOPCountingScalar(_TyBase f_value, _TyCount n_transcendent_operation_num) :m_f_value(f_value) { #pragma omp atomic m_n_trcd_num += n_transcendent_operation_num; } /** * @brief gets the value * @return Returns the stored value. * @note Automatic conversion operator to \ref _TyBase is not * implemented as it would make the debugging much harder. */ _TyBase f_Value() const { return m_f_value; } /** * @brief gets the value * @return Returns a reference the stored value. * @note Automatic conversion operator to \ref _TyBase is not * implemented as it would make the debugging much harder. */ _TyBase &f_Value() { return m_f_value; } /** * @brief unary minus operator * @return Returns the negative value of this. * @note This increments the addition counter. */ CFLOPCountingScalar operator -() const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(-m_f_value); } /** * @brief addition operator * @param[in] f_x is the value on the right side * @return Returns the sum of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator +(_TyBase f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value + f_x); } /** * @brief subtraction operator * @param[in] f_x is the value on the right side * @return Returns the difference of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator -(_TyBase f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value - f_x); } /** * @brief multiplication operator * @param[in] f_x is the value on the right side * @return Returns the product of the two values. * @note This increments the multiplication counter. */ CFLOPCountingScalar operator *(_TyBase f_x) const { #pragma omp atomic ++ m_n_mul_num; return CFLOPCountingScalar(m_f_value * f_x); } /** * @brief division operator * @param[in] f_x is the value on the right side * @return Returns the ratio of the two values. * @note This increments the division counter. */ CFLOPCountingScalar operator /(_TyBase f_x) const { #pragma omp atomic ++ m_n_div_num; return CFLOPCountingScalar(m_f_value / f_x); } /** * @brief inplace addition operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator +=(_TyBase f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value += f_x; return *this; } /** * @brief inplace subtraction operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator -=(_TyBase f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value -= f_x; return *this; } /** * @brief inplace multiplication operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the multiplication counter. */ CFLOPCountingScalar &operator *=(_TyBase f_x) { #pragma omp atomic ++ m_n_mul_num; m_f_value *= f_x; return *this; } /** * @brief inplace division operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the division counter. */ CFLOPCountingScalar &operator /=(_TyBase f_x) { #pragma omp atomic ++ m_n_div_num; m_f_value /= f_x; return *this; } /** * @brief addition operator * @param[in] f_x is the value on the right side * @return Returns the sum of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator +(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value + f_x.m_f_value); } /** * @brief subtraction operator * @param[in] f_x is the value on the right side * @return Returns the difference of the two values. * @note This increments the addition counter. */ CFLOPCountingScalar operator -(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_add_num; return CFLOPCountingScalar(m_f_value - f_x.m_f_value); } /** * @brief multiplication operator * @param[in] f_x is the value on the right side * @return Returns the product of the two values. * @note This increments the multiplication counter. */ CFLOPCountingScalar operator *(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_mul_num; return CFLOPCountingScalar(m_f_value * f_x.m_f_value); } /** * @brief division operator * @param[in] f_x is the value on the right side * @return Returns the ratio of the two values. * @note This increments the division counter. */ CFLOPCountingScalar operator /(CFLOPCountingScalar f_x) const { #pragma omp atomic ++ m_n_div_num; return CFLOPCountingScalar(m_f_value / f_x.m_f_value); } /** * @brief inplace addition operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator +=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value += f_x.m_f_value; return *this; } /** * @brief inplace subtraction operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the addition counter. */ CFLOPCountingScalar &operator -=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_add_num; m_f_value -= f_x.m_f_value; return *this; } /** * @brief inplace multiplication operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the multiplication counter. */ CFLOPCountingScalar &operator *=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_mul_num; m_f_value *= f_x.m_f_value; return *this; } /** * @brief inplace division operator * @param[in] f_x is the value on the right side * @return Returns reference to this. * @note This increments the division counter. */ CFLOPCountingScalar &operator /=(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_div_num; m_f_value /= f_x.m_f_value; return *this; } /** * @brief unary negation operator * @return Returns true if this equals zero, otherwise returns false. * @note This increments the comparison counter. */ bool operator !() const { #pragma omp atomic ++ m_n_cmp_num; return !m_f_value; } /** * @brief conversion to bool * * @return Returns nonzero (not 1) if this does not equal to zero, otherwise returns null. * * @note This uses the safe bool idiom to avoid mixing expansions in unsafe arithmetic expressions. * @note This increments the comparison counter. */ operator Boolean() const { #pragma omp atomic ++ m_n_cmp_num; return (m_f_value)? &CFLOPCountingScalar::True_Value : 0; } /** * @brief less-than operator * @param[in] f_x is the value on the right side * @return Returns true if this equals zero, otherwise returns false. * @note This increments the comparison counter. */ bool operator <(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value < f_x.m_f_value; } /** * @brief greater-than operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >(CFLOPCountingScalar f_x) { return CFLOPCountingScalar(f_x) < m_f_value; } /** * @brief equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator ==(CFLOPCountingScalar f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value == f_x.m_f_value; } /** * @brief not-equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is not equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator !=(CFLOPCountingScalar f_x) { return !(*this == f_x); } /** * @brief less-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is less than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator <=(CFLOPCountingScalar f_x) { return !(*this > f_x); } /** * @brief greater-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >=(CFLOPCountingScalar f_x) { return !(*this < f_x); } /** * @brief less-than operator * @param[in] f_x is the value on the right side * @return Returns true if this equals zero, otherwise returns false. * @note This increments the comparison counter. */ bool operator <(_TyBase f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value < f_x; } /** * @brief greater-than operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >(_TyBase f_x) { return CFLOPCountingScalar(f_x) < m_f_value; } /** * @brief equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator ==(_TyBase f_x) { #pragma omp atomic ++ m_n_cmp_num; return m_f_value == f_x; } /** * @brief not-equal-to operator * @param[in] f_x is the value on the right side * @return Returns true if this is not equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator !=(_TyBase f_x) { return !(*this == f_x); } /** * @brief less-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is less than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator <=(_TyBase f_x) { return !(*this > f_x); } /** * @brief greater-than or equal operator * @param[in] f_x is the value on the right side * @return Returns true if this is greater than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ bool operator >=(_TyBase f_x) { return !(*this < f_x); } protected: /** * @brief value of true for the safe bool idiom */ void True_Value() const {} }; /** * @brief addition operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the sum of the two values. * * @note This increments the addition counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator +(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) + f_y; } /** * @brief subtraction operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the difference of the two values. * * @note This increments the addition counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator -(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) - f_y; } /** * @brief multiplication operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the product of the two values. * * @note This increments the multiplication counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator *(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) * f_y; } /** * @brief division operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns the ratio of the two values. * * @note This increments the division counter. */ template <class CBaseSclar> inline CFLOPCountingScalar<CBaseSclar> operator /(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return CFLOPCountingScalar<CBaseSclar>(f_x) / f_y; } /** * @brief greater-than operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is greater than \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator >(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y < f_x; } /** * @brief equal-to operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator ==(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y == f_x; } /** * @brief not-equal-to operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is not equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator !=(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y != f_x; } /** * @brief less-than or equal operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is less than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator <=(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y >= f_x; } /** * @brief greater-than or equal operator * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is the value on the left side * @param[in] f_y is the value on the right side * * @return Returns true if this is greater than or equal to \ref f_x, otherwise returns false. * @note This increments the comparison counter. */ template <class CBaseSclar> inline bool operator >=(CBaseSclar f_x, CFLOPCountingScalar<CBaseSclar> f_y) { return f_y <= f_x; } template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_add_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_mul_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_div_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_trcd_num = 0; template <class CBaseSclar> typename CFLOPCountingScalar<CBaseSclar>::_TyCount CFLOPCountingScalar<CBaseSclar>::m_n_cmp_num = 0; // values of the counters /** * @brief (integer) absolute value function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value to take absolute value of * @return Returns absolute value of \ref f_x. * @note This increments (perhaps not entirely correctly) the transcendental operation counter. */ template <class CBaseSclar> CFLOPCountingScalar<CBaseSclar> abs(CFLOPCountingScalar<CBaseSclar> f_x) { return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)abs(int(f_x.f_Value())), 1); } /** * @brief (integer) absolute value function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value to take absolute value of * @return Returns absolute value of \ref f_x. * @note This increments (perhaps not entirely correctly) the transcendental operation counter. */ template <class CBaseSclar> CFLOPCountingScalar<CBaseSclar> labs(CFLOPCountingScalar<CBaseSclar> f_x) { return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)labs(long(f_x.f_Value())), 1); } /** * @def DECLARE_UNARY_TRANSCENDENTAL_OP * @brief declares unary transcendental operation * @tparam opname is operation name (e.g. sin) */ #define DECLARE_UNARY_TRANSCENDENTAL_OP(opname) \ template <class CBaseSclar> \ CFLOPCountingScalar<CBaseSclar> opname(CFLOPCountingScalar<CBaseSclar> f_x) \ { \ return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)opname(double(f_x.f_Value())), 1); /* did one transcendental op */ \ } /** * @def DECLARE_BINARY_TRANSCENDENTAL_OP * @brief declares binary transcendental operation * @tparam opname is operation name (e.g. atan2) */ #define DECLARE_BINARY_TRANSCENDENTAL_OP(opname) \ template <class CBaseSclar> \ CFLOPCountingScalar<CBaseSclar> opname(CFLOPCountingScalar<CBaseSclar> f_x, CFLOPCountingScalar<CBaseSclar> f_y) \ { \ return CFLOPCountingScalar<CBaseSclar>((CBaseSclar)opname(double(f_x.f_Value()), double(f_y.f_Value())), 1); /* did one transcendental op */ \ } /** * @brief absolute value function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value to take absolute value of * @return Returns absolute value of \ref f_x. * @note This increments (perhaps not entirely correctly) the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(fabs) /** * @brief arc-sine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns arc-sine of \ref f_x, expressed in radians. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(asin) /** * @brief arc-cosine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns arc-cosine of \ref f_x, expressed in radians. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(acos) /** * @brief arc-tangent function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns arc-tangent of \ref f_x, expressed in radians. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(atan) /** * @brief binary arc-tangent function * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is value of the first input argument * @param[in] f_y is value of the second input argument * * @return Returns arc-tangent of \ref f_x / \ref f_y, expressed in radians. * * @note This increments the transcendental operation counter. */ DECLARE_BINARY_TRANSCENDENTAL_OP(atan2) /** * @brief cosine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns cosine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(cos) /** * @brief hyperbolic cosine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns hyperbolic cosine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(cosh) /** * @brief base-e exponential function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns natural exponent of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(exp) /** * @brief floating-point modulo function * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is value of the numerator * @param[in] f_y is value of the denominator * * @return Returns modulo of \ref f_x / \ref f_y. * * @note This increments the transcendental operation counter. */ DECLARE_BINARY_TRANSCENDENTAL_OP(fmod) /** * @brief base-e logarithm function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns natural logarithm of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(log) /** * @brief base-10 logarithm function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns base-10 logarithm of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(log10) /** * @brief raise-to-power function * * @tparam CBaseSclar is a scalar type * * @param[in] f_x is value of the base * @param[in] f_y is value of the exponent * * @return Returns \ref f_x to the power of \ref f_y. * * @note This increments the transcendental operation counter. */ DECLARE_BINARY_TRANSCENDENTAL_OP(pow) /** * @brief sine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns sine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(sin) /** * @brief hyperbolic sine function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns hyperbolic sine of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(sinh) /** * @brief tangent function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns tangent of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(tan) /** * @brief hyperbolic tangent function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument, expressed in radians * @return Returns hyperbolic tangent of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(tanh) /** * @brief square root function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns square root of \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(sqrt) /** * @brief round up function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns the closest integer greater than or equal to \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(ceil) /** * @brief round down function * @tparam CBaseSclar is a scalar type * @param[in] f_x is value of the input argument * @return Returns the closest integer smaller than or equal to \ref f_x. * @note This increments the transcendental operation counter. */ DECLARE_UNARY_TRANSCENDENTAL_OP(floor) typedef CFLOPCountingScalar<float> CFLOPCountingFloat; /**< @brief FLOP-counting float */ typedef CFLOPCountingScalar<double> CFLOPCountingDouble; /**< @brief FLOP-counting double */ /** * @page countingflops Counting FLOPs in Sparse Operations * * This example shows how to make use of \ref CFLOPCountingDouble and \ref CTSparse * to count floating point operations (FLOPs) in arbitrary sparse operations. We begin * by including the two necessary files: * * @code * #include "sparse_flops/cts.hpp" * #include "sparse_flops/Instrument.h" * @endcode * * Now it is possible to typedef a flavor of templated CSparse which will count FLOPs: * * @code * typedef CTSparse<CFLOPCountingDouble> CFLOPCountingSparse; * @endcode * * A nice thing about this is that \ref CFLOPCountingDouble is a thin wrapper around * `double` and pointers to the two can be converted (so called type punning). This also * allows us to convert between ordinary sparse matrices \ref cs and `CFLOPCountingSparse`: * * @code * cs *A = cs_spalloc(...); * CFLOPCountingSparse *A_instrumented = CFLOPCountingSparse::p_FromSparse(A); // there * * CFLOPCountingSparse *B_instrumented = CFLOPCountingSparse::spalloc(...); * cs *B = CFLOPCountingSparse::p_ToSparse(B_instrumented); // and back * @endcode * * Here, the functions \ref CTSparse::p_FromSparse() and \ref CTSparse::p_ToSparse() * perform the necessary checks to make sure that the conversion is safe, otherwise * such code fails to compile. Note that the pointers `A` and `A_instrumented` * point to the same memory location (equally as `B` and `B_instrumented` do) and * no new memory is allocated. * * Now suppose we want to count the real number of FLOPs in a sparse Cholesky * factorization: * * @code * size_t n_Chol_FLOP_Num(const cs *A, int order = CFLOPCountingSparse::order_AMD_Chol) * { * CFLOPCountingSparse::css *S = CFLOPCountingSparse::schol(order, * CFLOPCountingSparse::p_FromSparse(A)); // calls AMD * * size_t n_before_chol = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::csn *N = CFLOPCountingSparse::chol(CFLOPCountingSparse::p_FromSparse(A), S); * * size_t n_flops = CFLOPCountingDouble::n_FLOP_Num() - n_before_chol; * * CFLOPCountingSparse::sfree(S); * CFLOPCountingSparse::nfree(N); * * return n_flops; * } * @endcode * * The first line performs symbolic factorization using \ref CTSparse::schol(). Then, * the counters of floating point operations are sampled, using \ref CFLOPCountingScalar::n_FLOP_Num() * and the numeric Cholesky factorization is performed using \ref CTSparse::chol(). * After that, the difference in the number of FLOPs is taken. Alternatively, one can call * \ref CFLOPCountingScalar::Reset_Counters() before and then directly read out the number of operations * using \ref CFLOPCountingScalar::n_FLOP_Num(). * * This has the advantage that it actually calculates the factorization in the process, so it is * fairly easy to instrument existing code this way and it is possible to count FLOPs in iterative * code where the stopping condition depends on the computed values. * * The full code of the example follows: * * @code * #include "sparse_flops/cts.hpp" * #include "sparse_flops/Instrument.h" * * typedef CTSparse<CFLOPCountingDouble> CFLOPCountingSparse; * * cs *p_AllocFull(csi m, csi n, double f_value = 1.0) * { * if(n && m > LONG_MAX / n) * return 0; // would overflow below * cs *p_matrix = cs_spalloc(m, n, m * n, 1, 0); * csi n_off = 0; * for(csi i = 0; i < n; ++ i) { * p_matrix->p[i] = n_off; * for(csi j = 0; j < m; ++ j, ++ n_off) { * p_matrix->i[n_off] = j; * p_matrix->x[n_off] = f_value; * } * } * p_matrix->p[n] = n_off; * return p_matrix; * } * * cs *p_AllocLower(csi m, csi n, double f_value = 1.0) * { * if(n && m > LONG_MAX / n) * return 0; // would overflow below * size_t n_nnz = std::min(m, n) * (std::min(m, n) - 1) / 2 + std::min(m, n) + // the square triangular section * (m - std::min(m, n)) * n; // the bottom side if the matrix is narrow (completely filled) * cs *p_matrix = cs_spalloc(m, n, n_nnz, 1, 0); * csi n_off = 0; * for(csi i = 0; i < n; ++ i) { * p_matrix->p[i] = n_off; * for(csi j = i; j < m; ++ j, ++ n_off) { * p_matrix->i[n_off] = j; * p_matrix->x[n_off] = f_value; * } * } * p_matrix->p[n] = n_off; * _ASSERTE(n_off == n_nnz); * return p_matrix; * } * * size_t n_GEMM_FLOP_Num(const cs *A, const cs *B) * { * size_t n_before = CFLOPCountingDouble::n_FLOP_Num(); * * cs *p_result = CFLOPCountingSparse::p_ToSparse(CFLOPCountingSparse::multiply( * CFLOPCountingSparse::p_FromSparse(A), CFLOPCountingSparse::p_FromSparse(B))); * cs_spfree(p_result); * * return CFLOPCountingDouble::n_FLOP_Num() - n_before; * } * * size_t n_GAXPY_FLOP_Num(const cs *A, const double *x, double *y) * { * size_t n_before = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::gaxpy(CFLOPCountingSparse::p_FromSparse(A), * (CFLOPCountingDouble*)x, (CFLOPCountingDouble*)y); * * return CFLOPCountingDouble::n_FLOP_Num() - n_before; * } * * size_t n_TRSV_FLOP_Num(const cs *L, double *x) * { * size_t n_before = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::lsolve(CFLOPCountingSparse::p_FromSparse(L), (CFLOPCountingDouble*)x); * * return CFLOPCountingDouble::n_FLOP_Num() - n_before; * } * * size_t n_Chol_FLOP_Num(const cs *A, int order = CFLOPCountingSparse::order_AMD_Chol) * { * CFLOPCountingSparse::css *S = CFLOPCountingSparse::schol(order, * CFLOPCountingSparse::p_FromSparse(A)); // calls AMD * * size_t n_before_chol = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::csn *N = CFLOPCountingSparse::chol(CFLOPCountingSparse::p_FromSparse(A), S); * * size_t n_flops = CFLOPCountingDouble::n_FLOP_Num() - n_before_chol; * * CFLOPCountingSparse::sfree(S); * CFLOPCountingSparse::nfree(N); * * return n_flops; * } * * size_t n_LU_FLOP_Num(const cs *A, int order = CFLOPCountingSparse::order_AMD_LU) * { * CFLOPCountingSparse::css *S = CFLOPCountingSparse::sqr(order, * CFLOPCountingSparse::p_FromSparse(A), 0); // calls AMD * * size_t n_before_chol = CFLOPCountingDouble::n_FLOP_Num(); * * CFLOPCountingSparse::csn *N = CFLOPCountingSparse::lu(CFLOPCountingSparse::p_FromSparse(A), S, 1e-3); * * size_t n_flops = CFLOPCountingDouble::n_FLOP_Num() - n_before_chol; * * CFLOPCountingSparse::sfree(S); * CFLOPCountingSparse::nfree(N); * * return n_flops; * } * * void Test_SparseOpsCost() * { * cs *A = p_AllocFull(100, 100); * * printf("counting FLOPs in GEMM of two 100 x 100 matrices\n"); * size_t n_GEMM_cost = n_GEMM_FLOP_Num(A, A); * size_t n_GEMM_cost_GT = 100 * 100 * (100 * 2 - 1); // the leading addition is saved * printf("\tground truth FLOPs: " PRIsize "\n", n_GEMM_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_GEMM_cost, * (n_GEMM_cost == n_GEMM_cost_GT)? "pass" : "FAIL"); * * printf("\ncounting FLOPs in GAXPY of a 100 x 100 matrix and a 100 x 1 vector\n"); * double x[100] = {0}, y[100] = {0}; * size_t n_GAXPY_cost = n_GAXPY_FLOP_Num(A, x, y); * size_t n_GAXPY_cost_GT = 100 * 100 * 2; * printf("\tground truth FLOPs: " PRIsize "\n", n_GAXPY_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_GAXPY_cost, * (n_GAXPY_cost == n_GAXPY_cost_GT)? "pass" : "FAIL"); * * for(int i = 0; i < 100; ++ i) * A->x[i * 100 + i] = 10.0; * // make the diagonal a bit larger in order for the matrix to be positive definite * * printf("\ncounting FLOPs in Cholesky of a 100 x 100 matrix\n"); * size_t n_Chol_cost = n_Chol_FLOP_Num(A, CFLOPCountingSparse::order_Natural); * size_t n_Chol_cost_GT = 100 * 100 * 100 / 3 + (100 * (100 - 1)) / 2 + 100; // O(n^3/3 + nnz) * printf("\tground truth FLOPs: " PRIsize "\n", n_Chol_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_Chol_cost, * (n_Chol_cost == n_Chol_cost_GT)? "pass" : * (fabs(double(n_Chol_cost - n_Chol_cost_GT) / n_Chol_cost_GT) < 1e-3)? * "pass within 0.1 %" : "FAIL"); // up to 0.1% discrepancy allowed * * cs *L = p_AllocLower(100, 100); * // get a triangular matrix * * printf("\ncounting FLOPs in TRSV of a 100 x 100 lower-triangular matrix and a 100 x 1 vector\n"); * size_t n_TRSV_cost = n_TRSV_FLOP_Num(L, x); * size_t n_TRSV_cost_GT = 100 * 100 / 2 * 2; * printf("\tground truth FLOPs: " PRIsize "\n", n_TRSV_cost_GT); * printf("\trecorded FLOPs: " PRIsize " (%s)\n", n_TRSV_cost, * (n_TRSV_cost == n_TRSV_cost_GT)? "pass" : "FAIL"); * * cs_spfree(A); * cs_spfree(L); * } * @endcode * * Note that here, FLOPs is a plural of FLOP. It does not refer to floating point * operations per second (FLOPS with capital `S'). * */ #endif // !__FLOP_COUNTING_SCALAR_INCLUDED
Searching.202002141745.critical_omp_top_m.h
// // Created by Zhen Peng on 11/11/19. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> //#include <omp.h> #include "../../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../../include/utils.h" #include "../../include/Candidate.h" #include "../../include/parallelization.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; int dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, Candidate cand); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K) const; // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); void para_search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void para_prepare_init_ids( // std::vector<unsigned> &init_ids, // unsigned L) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K) const { // std::vector<Candidate> set_L(L+1); // std::vector<idi> init_ids(L); boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } // { // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // unsigned out_degree = *out_edges++; // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { // init_ids[tmp_l] = out_edges[tmp_l]; // } // // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (tmp_l < L) { // tmp_id %= num_v_; // unsigned id = tmp_id++; // if (is_visited[id]) { // continue; // } // is_visited[id] = true; // init_ids[tmp_l] = id; // tmp_l++; // } // } // const std::vector<dataf> &query = queries_load_[query_id]; // std::vector<char> is_checked(L + 1, 0); // boost::dynamic_bitset<> is_checked(num_v_); // cache_miss_kernel.measure_stop(); // cache_miss_kernel.measure_start(); const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; // _mm_prefetch(reinterpret_cast<char *>(data_load_ + v_id * dimension_), _MM_HINT_T0); _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // cache_miss_kernel.measure_stop(); // cache_miss_kernel.measure_start(); idi k = 0; // Index of every queue's first unchecked candidate. while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, PANNS::Candidate cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // {//test // printf("tmp_count: %u\n", tmp_count); // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // printf("top_m_candidates[%u]: %u\n", // c_i, // top_m_candidates[c_i]); // } // if (3 == tmp_count) { // exit(1); // } // if (3 == tmp_count) { // printf("top_m_candidates[76]: %u\n", // top_m_candidates[76]); // } // } // if (top_m_candidates_end) { // std::vector<idi> tmp_top_m(top_m_candidates_end); // tmp_top_m.assign(top_m_candidates.begin(), top_m_candidates.begin() + top_m_candidates_end); // top_m_list.push_back(tmp_top_m); // } else { // break; // } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // {//test // if (793600 == cand_id) { // printf("e_i: %u " // "nb_id: %u\n", // e_i, // nb_id); // } // } if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } // ///////////////////////////////////////// // Candidate &top_cand = set_L[k]; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: %u: %u %f\n", // query_id, // k_i, set_L[k_i].id_, set_L[k_i].distance_); // } // exit(1); // } } inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchekced candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } inline void Searching::para_search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // int nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //#pragma omp parallel for //#pragma omp parallel for reduction(min : nk) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r; //#pragma omp critical { r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: %u: %u %f\n", // query_id, // k_i, set_L[k_i].id_, set_L[k_i].distance_); // } // exit(1); // } } // DEPRECATED. No enough workload for OpenMP, and hard to implement efficiently. ///** // * Prepare init_ids and flags, as they are constant for all queries. // * @param[out] init_ids // * @param L // */ //inline void Searching::para_prepare_init_ids( // std::vector<unsigned int> &init_ids, // unsigned L) const //{ //// idi num_ngbrs = get_out_degree(ep_); //// edgei edge_start = nsg_graph_indices_[ep_]; //// // Store ep_'s neighbors as candidates //// idi tmp_l = 0; //// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { //// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; //// } //// std::unordered_set<idi> visited_ids; // std::vector<uint8_t> is_selected(num_v_, 0); //// boost::dynamic_bitset<> is_selected(num_v_); // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // idi init_ids_end = 0; //// idi e_i_bound = out_degree <= L ? out_degree : L; //#pragma omp parallel for // for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { //// for (idi e_i = 0; e_i < e_i_bound; ++e_i) { // idi v_id = out_edges[e_i]; //// if(is_selected[v_id]) { //// continue; //// } //// is_selected[v_id] = 1; // // if (!AtomicOps::CAS(is_selected.data() + v_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // //// init_ids[init_ids_end++] = v_id; // volatile idi old_v = init_ids_end; // volatile idi new_v = old_v + 1; // while (!AtomicOps::CAS(&init_ids_end, old_v, new_v)) { // old_v = init_ids_end; // new_v = old_v + 1; // } // init_ids[old_v] = v_id; // } // //// for (idi i = 0; i < tmp_l; ++i) { //// is_visited[init_ids[i]] = true; //// } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (init_ids_end < L) { // tmp_id %= num_v_; // idi v_id = tmp_id++; // if (is_selected[v_id]) { // continue; // } //// if (visited_ids.find(id) != visited_ids.end()) { //// continue; //// } // is_selected[v_id] = 1; //// visited_ids.insert(id); // init_ids[init_ids_end++] = v_id; //// tmp_l++; // } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
GB_unop__exp2_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp2_fp32_fp32 // op(A') function: GB_unop_tran__exp2_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = exp2f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = exp2f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = exp2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp2_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = exp2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp2_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__land_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__land_uint8 // A.*B function (eWiseMult): GB_AemultB__land_uint8 // A*D function (colscale): GB_AxD__land_uint8 // D*A function (rowscale): GB_DxB__land_uint8 // C+=B function (dense accum): GB_Cdense_accumB__land_uint8 // C+=b function (dense accum): GB_Cdense_accumb__land_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_uint8 // C=scalar+B GB_bind1st__land_uint8 // C=scalar+B' GB_bind1st_tran__land_uint8 // C=A+scalar GB_bind2nd__land_uint8 // C=A'+scalar GB_bind2nd_tran__land_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) && (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT8 || GxB_NO_LAND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__land_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__land_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__land_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__land_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__land_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__land_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__land_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__land_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__land_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__land_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__land_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_uint8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_uint16 // op(A') function: GB_tran__abs_uint8_uint16 // C type: uint8_t // A type: uint16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_uint16 ( uint8_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
csrcsc_task.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. #pragma once #include <cstring> #include "pointers/pointer.h" #include "tasks/task.h" namespace flash { class BlockCsrCscTask : public BaseTask { FBLAS_UINT pdim; FBLAS_UINT nnzs; SparseBlock A_blk; SparseBlock A_tr_blk; public: BlockCsrCscTask(SparseBlock A_block, SparseBlock A_tr_block) : A_blk(A_block), A_tr_blk(A_tr_block) { this->pdim = std::max(A_blk.nrows, A_blk.ncols); nnzs = A_blk.offs[A_blk.blk_size] - A_blk.offs[0]; GLOG_INFO("will transpose nnzs=", nnzs, ", starting at row=", A_blk.start); StrideInfo sinfo; sinfo.n_strides = 1; sinfo.stride = 0; // reads & writes for `column indices` sinfo.len_per_stride = nnzs * sizeof(MKL_INT); this->add_read(A_blk.idxs_fptr, sinfo); this->add_write(A_tr_blk.idxs_fptr, sinfo); // reads & writes for `matrix values` sinfo.len_per_stride = nnzs * sizeof(FPTYPE); this->add_read(A_blk.vals_fptr, sinfo); this->add_write(A_tr_blk.vals_fptr, sinfo); } void execute() { // extract matrix dimensions FBLAS_UINT A_cols = A_blk.ncols; FBLAS_UINT A_rows = A_blk.nrows; MKL_INT *input_offs = new MKL_INT[pdim + 1]; MKL_INT *output_offs = new MKL_INT[pdim + 1]; for (FBLAS_UINT i = 0; i <= A_blk.blk_size; i++) { input_offs[i] = A_blk.offs[i] - A_blk.offs[0]; } // expand csr matrix with zero rows for (FBLAS_UINT i = A_blk.blk_size + 1; i <= pdim; i++) { input_offs[i] = input_offs[A_blk.blk_size]; } mkl_set_num_threads_local(CSRCSC_MKL_NTHREADS); SparseBlock A_pblk(A_blk), A_tr_pblk(A_tr_blk); A_pblk.offs = input_offs; A_tr_pblk.offs = output_offs; // fill in-memory pointers into blocks fill_sparse_block_ptrs(this->in_mem_ptrs, A_pblk); fill_sparse_block_ptrs(this->in_mem_ptrs, A_tr_pblk); // prepare MKL call MKL_INT job[6] = {0, 0, 0, -1, -1, 1}; MKL_INT dim = pdim; MKL_INT info = -1; // not used // make MKL call mkl_csrcsc(job, &dim, A_pblk.vals_ptr, A_pblk.idxs_ptr, A_pblk.offs, A_tr_pblk.vals_ptr, A_tr_pblk.idxs_ptr, A_tr_pblk.offs, &info); // add A_blk.start to `A_pblk.idxs_ptr` #pragma omp parallel for num_threads(CSRCSC_MKL_NTHREADS) for (FBLAS_UINT i = 0; i < nnzs; i++) { A_tr_pblk.idxs_ptr[i] += A_blk.start; } memcpy(A_tr_blk.offs, A_tr_pblk.offs, (A_cols + 1) * sizeof(MKL_INT)); delete[] A_pblk.offs; delete[] A_tr_pblk.offs; GLOG_ASSERT(A_tr_blk.offs[A_cols] == nnzs, "bad csrcsc params:input nnzs=", nnzs, ", output nnzs=", A_tr_blk.offs[A_cols]); GLOG_INFO("transposed:nnzs=", A_tr_blk.offs[A_cols]); } // DEPRECATED FBLAS_UINT size() { return (1 << 20); } }; // Horizontally merge [column join] CSR matrices into one CSR matrix class BlockMergeTask : public BaseTask { SparseBlock A_blk; std::vector<SparseBlock> A_blks; public: BlockMergeTask(SparseBlock A_block, std::vector<SparseBlock> A_blocks) : A_blk(A_block) { A_blks.reserve(A_blocks.size()); FBLAS_UINT total_nnzs = A_blk.offs[A_blk.blk_size] - A_blk.offs[0]; GLOG_INFO("merging nnzs=", total_nnzs); StrideInfo sinfo = {1, 1, 1}; sinfo.len_per_stride = total_nnzs * sizeof(MKL_INT); this->add_write(A_blk.idxs_fptr, sinfo); sinfo.len_per_stride = total_nnzs * sizeof(FPTYPE); this->add_write(A_blk.vals_fptr, sinfo); FBLAS_UINT got_nnzs = 0; for (auto blk : A_blocks) { FBLAS_UINT blk_nnzs = blk.offs[blk.blk_size] - blk.offs[0]; got_nnzs += blk_nnzs; if (blk_nnzs == 0) { GLOG_WARN("ignoring 0-block in merge"); continue; } this->A_blks.push_back(blk); sinfo.len_per_stride = blk_nnzs * sizeof(MKL_INT); this->add_read(blk.idxs_fptr, sinfo); sinfo.len_per_stride = blk_nnzs * sizeof(FPTYPE); this->add_read(blk.vals_fptr, sinfo); } GLOG_ASSERT(got_nnzs == total_nnzs, " expected nnzs=", total_nnzs, ", got nnzs=", got_nnzs); } void execute() { // fill in sparse blocks fill_sparse_block_ptrs(this->in_mem_ptrs, A_blk); for (auto &blk : A_blks) { fill_sparse_block_ptrs(this->in_mem_ptrs, blk); } #pragma omp parallel for schedule(dynamic, 1) num_threads(CSRCSC_MKL_NTHREADS) for (FBLAS_UINT row = 0; row < A_blk.blk_size; row++) { FBLAS_UINT fill_offset = (A_blk.offs[row] - A_blk.offs[0]); for (auto &blk : A_blks) { FBLAS_UINT read_offset = (blk.offs[row] - blk.offs[0]); FBLAS_UINT nnzs_in_blk = (blk.offs[row + 1] - blk.offs[row]); memcpy(A_blk.idxs_ptr + fill_offset, blk.idxs_ptr + read_offset, nnzs_in_blk * sizeof(MKL_INT)); memcpy(A_blk.vals_ptr + fill_offset, blk.vals_ptr + read_offset, nnzs_in_blk * sizeof(FPTYPE)); fill_offset += nnzs_in_blk; } FBLAS_UINT expected_nnzs_in_row = (A_blk.offs[row + 1] - A_blk.offs[row]); FBLAS_UINT got_nnzs_in_row = fill_offset - (A_blk.offs[row] - A_blk.offs[0]); GLOG_ASSERT(expected_nnzs_in_row == got_nnzs_in_row, ", expected to fill ", expected_nnzs_in_row, ", filled only ", got_nnzs_in_row); } } // DEPRECATED FBLAS_UINT size() { return (1 << 20); } }; } // namespace flash
base_mortar_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_BASE_MORTAR_CRITERIA_H) #define KRATOS_BASE_MORTAR_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "custom_utilities/contact_utilities.h" #include "utilities/mortar_utilities.h" #include "utilities/variable_utils.h" #include "custom_processes/aalm_adapt_penalty_value_process.h" #include "custom_processes/compute_dynamic_factor_process.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" // DEBUG #include "includes/gid_io.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class BaseMortarConvergenceCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Custom convergence criteria for the mortar condition * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace> class BaseMortarConvergenceCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of BaseMortarConvergenceCriteria KRATOS_CLASS_POINTER_DEFINITION( BaseMortarConvergenceCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( COMPUTE_DYNAMIC_FACTOR ); KRATOS_DEFINE_LOCAL_FLAG( IO_DEBUG ); KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The components containers typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::NodesContainerType NodesArrayType; typedef GidIO<> GidIOBaseType; ///@} ///@name Life Cycle ///@{ /// Default constructors explicit BaseMortarConvergenceCriteria( const bool ComputeDynamicFactor = false, const bool IODebug = false, const bool PureSlip = false ) : ConvergenceCriteria< TSparseSpace, TDenseSpace >(), mpIO(nullptr) { // Set local flags mOptions.Set(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR, ComputeDynamicFactor); mOptions.Set(BaseMortarConvergenceCriteria::IO_DEBUG, IODebug); mOptions.Set(BaseMortarConvergenceCriteria::PURE_SLIP, PureSlip); if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO = Kratos::make_shared<GidIOBaseType>("POST_LINEAR_ITER", GiD_PostBinary, SingleFile, WriteUndeformed, WriteElementsOnly); } } ///Copy constructor BaseMortarConvergenceCriteria( BaseMortarConvergenceCriteria const& rOther ) :BaseType(rOther), mOptions(rOther.mOptions), mpIO(rOther.mpIO) { } /// Destructor ~BaseMortarConvergenceCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Criterias that need to be called before getting the solution * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PreCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // The contact model part ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); // We update the normals if necessary const auto normal_variation = r_process_info.Has(CONSIDER_NORMAL_VARIATION) ? static_cast<NormalDerivativesComputation>(r_process_info.GetValue(CONSIDER_NORMAL_VARIATION)) : NO_DERIVATIVES_COMPUTATION; if (normal_variation != NO_DERIVATIVES_COMPUTATION) { ComputeNodesMeanNormalModelPartWithPairedNormal(rModelPart); // Update normal of the conditions } // Update tangent (must be updated even for constant normal) const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; if (frictional_problem) { const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER); if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part); } else { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true); } } const bool adapt_penalty = r_process_info.Has(ADAPT_PENALTY) ? r_process_info.GetValue(ADAPT_PENALTY) : false; const bool dynamic_case = rModelPart.HasNodalSolutionStepVariable(VELOCITY); /* Compute weighthed gap */ if (adapt_penalty || dynamic_case) { // Set to zero the weighted gap ResetWeightedGap(rModelPart); // Compute the contribution ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact")); } // In dynamic case if ( dynamic_case && mOptions.Is(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR)) { ComputeDynamicFactorProcess compute_dynamic_factor_process( r_contact_model_part ); compute_dynamic_factor_process.Execute(); } // We recalculate the penalty parameter if ( adapt_penalty ) { AALMAdaptPenaltyValueProcess aalm_adaptation_of_penalty( r_contact_model_part ); aalm_adaptation_of_penalty.Execute(); } return true; } /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // We save the current WEIGHTED_GAP in the buffer NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes(); const auto it_node_begin = r_nodes_array.begin(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); } // Set to zero the weighted gap ResetWeightedGap(rModelPart); // Compute the contribution ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact")); // GiD IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; const int nl_iter = rModelPart.GetProcessInfo()[NL_ITERATION_NUMBER]; const double label = static_cast<double>(nl_iter); if (nl_iter == 1) { mpIO->InitializeMesh(label); mpIO->WriteMesh(rModelPart.GetMesh()); mpIO->FinalizeMesh(); mpIO->InitializeResults(label, rModelPart.GetMesh()); } mpIO->WriteNodalFlags(INTERFACE, "INTERFACE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(ACTIVE, "ACTIVE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(SLAVE, "SLAVE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(ISOLATED, "ISOLATED", rModelPart.Nodes(), label); mpIO->WriteNodalResults(NORMAL, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResultsNonHistorical(DYNAMIC_FACTOR, rModelPart.Nodes(), label); mpIO->WriteNodalResultsNonHistorical(AUGMENTED_NORMAL_CONTACT_PRESSURE, rModelPart.Nodes(), label); mpIO->WriteNodalResults(DISPLACEMENT, rModelPart.Nodes(), label, 0); if (rModelPart.Nodes().begin()->SolutionStepsDataHas(VELOCITY_X)) { mpIO->WriteNodalResults(VELOCITY, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResults(ACCELERATION, rModelPart.Nodes(), label, 0); } if (r_nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) mpIO->WriteNodalResults(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE, rModelPart.Nodes(), label, 0); else if (r_nodes_array.begin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X)) mpIO->WriteNodalResults(VECTOR_LAGRANGE_MULTIPLIER, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResults(WEIGHTED_GAP, rModelPart.Nodes(), label, 0); if (frictional_problem) { mpIO->WriteNodalFlags(SLIP, "SLIP", rModelPart.Nodes(), label); mpIO->WriteNodalResults(WEIGHTED_SLIP, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResultsNonHistorical(AUGMENTED_TANGENT_CONTACT_PRESSURE, rModelPart.Nodes(), label); } } return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart The model part of interest */ void Initialize(ModelPart& rModelPart) override { // Calling base criteria BaseType::Initialize(rModelPart); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Update normal of the conditions ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part); const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; if (frictional_problem) { const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER); if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part); } else { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true); } } // IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO->CloseResultFile(); std::ostringstream new_name ; new_name << "POST_LINEAR_ITER_STEP=""POST_LINEAR_ITER_STEP=" << rModelPart.GetProcessInfo()[STEP]; mpIO->ChangeOutputName(new_name.str()); } } /** * @brief This function finalizes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void FinalizeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO->FinalizeResults(); } } /** * @brief This function finalizes the non-linear iteration * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void FinalizeNonLinearIteration( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Calling base criteria BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Flags mOptions; /// Local flags ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method resets the weighted gap in the nodes of the problem * @param rModelPart Reference to the ModelPart containing the contact problem. */ virtual void ResetWeightedGap(ModelPart& rModelPart) { NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes(); VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, r_nodes_array); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ GidIOBaseType::Pointer mpIO; /// The pointer to the debugging IO ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief It computes the mean of the normal in the condition in all the nodes * @param rModelPart The model part to compute */ inline void ComputeNodesMeanNormalModelPartWithPairedNormal(ModelPart& rModelPart) { // Compute normal and tangent ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part); // Iterate over the computing conditions ModelPart& r_computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact"); ConditionsArrayType& r_conditions_array = r_computing_contact_model_part.Conditions(); const auto it_cond_begin = r_conditions_array.begin(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Aux coordinates Point::CoordinatesArrayType aux_coords; // We update the paired normal GeometryType& r_parent_geometry = it_cond->GetGeometry().GetGeometryPart(0); aux_coords = r_parent_geometry.PointLocalCoordinates(aux_coords, r_parent_geometry.Center()); it_cond->SetValue(NORMAL, r_parent_geometry.UnitNormal(aux_coords)); } } ///@} ///@name Private Access ///@{ ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Class BaseMortarConvergenceCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::IO_DEBUG(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(2)); } // namespace Kratos #endif /* KRATOS_BASE_MORTAR_CRITERIA_H defined */
GB_unaryop__ainv_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp64_fp64 // op(A') function: GB_tran__ainv_fp64_fp64 // C type: double // A type: double // cast: double cij = (double) aij // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
doitgen.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "doitgen.h" /* Array initialization. */ static void init_array(int nr, int nq, int np, DATA_TYPE POLYBENCH_3D(A,NR,NQ,NP,nr,nq,np), DATA_TYPE POLYBENCH_2D(C4,NP,NP,np,np)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(NR) ") final)"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(NQ) ") final)"))); int k __attribute__((annotate("scalar(range(0, " PB_XSTR(NP) ") final)"))); for (i = 0; i < nr; i++) for (j = 0; j < nq; j++) for (k = 0; k < np; k++) A[i][j][k] = ((DATA_TYPE) i*j + k) / np; for (i = 0; i < np; i++) for (j = 0; j < np; j++) C4[i][j] = ((DATA_TYPE) i*j) / np; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nr, int nq, int np, DATA_TYPE POLYBENCH_3D(A,NR,NQ,NP,nr,nq,np)) { int i, j, k; for (i = 0; i < nr; i++) for (j = 0; j < nq; j++) for (k = 0; k < np; k++) { fprintf (stderr, DATA_PRINTF_MODIFIER, A[i][j][k]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_doitgen(int nr, int nq, int np, DATA_TYPE POLYBENCH_3D(A,NR,NQ,NP,nr,nq,np), DATA_TYPE POLYBENCH_2D(C4,NP,NP,np,np), DATA_TYPE POLYBENCH_3D(sum,NR,NQ,NP,nr,nq,np)) { int r, q, p, s; #pragma scop #pragma omp parallel { #pragma omp for private (q, p, s) for (r = 0; r < _PB_NR; r++) for (q = 0; q < _PB_NQ; q++) { for (p = 0; p < _PB_NP; p++) { sum[r][q][p] = 0; for (s = 0; s < _PB_NP; s++) sum[r][q][p] = sum[r][q][p] + A[r][q][s] * C4[s][p]; } for (p = 0; p < _PB_NR; p++) A[r][q][p] = sum[r][q][p]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nr = NR; int nq = NQ; int np = NP; /* Variable declaration/allocation. */ POLYBENCH_3D_ARRAY_DECL(A,DATA_TYPE __attribute__((annotate("target('A') scalar(range(0, 1000000) final)"))),NR,NQ,NP,nr,nq,np); POLYBENCH_3D_ARRAY_DECL(sum,DATA_TYPE __attribute__((annotate("target('sum') scalar(range(0, 1000000) final)"))),NR,NQ,NP,nr,nq,np); POLYBENCH_2D_ARRAY_DECL(C4,DATA_TYPE __attribute__((annotate("target('C4') scalar()"))),NP,NP,np,np); /* Initialize array(s). */ init_array (nr, nq, np, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(C4)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_doitgen (nr, nq, np, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(C4), POLYBENCH_ARRAY(sum)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nr, nq, np, POLYBENCH_ARRAY(A))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(sum); POLYBENCH_FREE_ARRAY(C4); return 0; }
ex02.c
/* Copyright (c) 2019 CSC Training */ // Copyright (c) 2021 ENCCS #include <stdio.h> #include <math.h> #define NX 102400 int main(void) { double vecA[NX],vecB[NX],vecC[NX]; double r=0.2; /* Initialization of vectors */ for (int i = 0; i < NX; i++) { vecA[i] = pow(r, i); vecB[i] = 1.0; } /* dot product of two vectors */ #pragma omp target for (int i = 0; i < NX; i++) { vecC[i] = vecA[i] * vecB[i]; } double sum = 0.0; /* calculate the sum */ for (int i = 0; i < NX; i++) { sum += vecC[i]; } printf("The sum is: %8.6f \n", sum); return 0; }
core_zlantr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ __attribute__((weak)) void plasma_core_zlantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex64_t *A, int lda, double *work, double *value) { // Due to a bug in LAPACKE < 3.6.1, this function always returns zero. // *value = LAPACKE_zlantr_work(LAPACK_COL_MAJOR, // lapack_const(norm), lapack_const(uplo), // lapack_const(diag), // m, n, A, lda, work); // Calling LAPACK directly instead. char nrm = lapack_const(norm); char upl = lapack_const(uplo); char dia = lapack_const(diag); *value = LAPACK_zlantr(&nrm, &upl, &dia, &m, &n, A, &lda, work); } /******************************************************************************/ void plasma_core_omp_zlantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex64_t *A, int lda, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) plasma_core_zlantr(norm, uplo, diag, m, n, A, lda, work, value); } } /******************************************************************************/ void plasma_core_omp_zlantr_aux(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex64_t *A, int lda, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { if (uplo == PlasmaUpper) { if (diag == PlasmaNonUnit) { for (int j = 0; j < n; j++) { value[j] = cabs(A[lda*j]); for (int i = 1; i < imin(j+1, m); i++) { value[j] += cabs(A[lda*j+i]); } } } else { // PlasmaUnit int j; for (j = 0; j < imin(n, m); j++) { value[j] = 1.0; for (int i = 0; i < j; i++) { value[j] += cabs(A[lda*j+i]); } } for (; j < n; j++) { value[j] = cabs(A[lda*j]); for (int i = 1; i < m; i++) { value[j] += cabs(A[lda*j+i]); } } } } else { // PlasmaLower if (diag == PlasmaNonUnit) { int j; for (j = 0; j < imin(n, m); j++) { value[j] = cabs(A[lda*j+j]); for (int i = j+1; i < m; i++) { value[j] += cabs(A[lda*j+i]); } } for (; j < n; j++) value[j] = 0.0; } else { // PlasmaUnit int j; for (j = 0; j < imin(n, m); j++) { value[j] = 1.0; for (int i = j+1; i < m; i++) { value[j] += cabs(A[lda*j+i]); } } for (; j < n; j++) value[j] = 0.0; } } } } break; case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:m]) { if (sequence->status == PlasmaSuccess) { if (uplo == PlasmaUpper) { if (diag == PlasmaNonUnit) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < imin(j+1, m); i++) { value[i] += cabs(A[lda*j+i]); } } } else { // PlasmaUnit int i; for (i = 0; i < imin(m, n); i++) value[i] = 1.0; for (; i < m; i++) value[i] = 0.0; int j; for (j = 0; j < imin(n, m); j++) { for (i = 0; i < j; i++) { value[i] += cabs(A[lda*j+i]); } } for (; j < n; j++) { for (i = 0; i < m; i++) { value[i] += cabs(A[lda*j+i]); } } } } else { // PlasmaLower if (diag == PlasmaNonUnit) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < imin(n, m); j++) { for (int i = j; i < m; i++) { value[i] += cabs(A[lda*j+i]); } } } else { // PlasmaUnit int i; for (i = 0; i < imin(m, n); i++) value[i] = 1.0; for (; i < m; i++) value[i] = 0.0; for (int j = 0; j < imin(n, m); j++) { for (i = j+1; i < m; i++) { value[i] += cabs(A[lda*j+i]); } } } } } } break; } }
GB_binop__bshift_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint64) // C=scalar+B GB (_bind1st__bshift_uint64) // C=scalar+B' GB (_bind1st_tran__bshift_uint64) // C=A+scalar GB (_bind2nd__bshift_uint64) // C=A'+scalar GB (_bind2nd_tran__bshift_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint64 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT64 || GxB_NO_BSHIFT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_zunmqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_unmqr * * Overwrites the general m-by-n tile C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = Plasma_ConjTrans Q^H * C C * Q^H * * where Q is a unitary matrix defined as the product of k * elementary reflectors * \f[ * Q = H(1) H(2) ... H(k) * \f] * as returned by core_zgeqrt. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^H from the Left; * - PlasmaRight : apply Q or Q^H from the Right. * * @param[in] trans * - PlasmaNoTrans : No transpose, apply Q; * - Plasma_ConjTrans : Transpose, apply Q^H. * * @param[in] m * The number of rows of the tile C. m >= 0. * * @param[in] n * The number of columns of the tile C. n >= 0. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * If side = PlasmaLeft, m >= k >= 0; * if side = PlasmaRight, n >= k >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in] A * Dimension: (lda,k) * The i-th column must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, * as returned by core_zgeqrt in the first k columns of its * array argument A. * * @param[in] lda * The leading dimension of the array A. * If side = PlasmaLeft, lda >= max(1,m); * if side = PlasmaRight, lda >= max(1,n). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param[in,out] C * On entry, the m-by-n tile C. * On exit, C is overwritten by Q*C or Q^T*C or C*Q^T or C*Q. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * * @param work * Auxiliary workspace array of length * ldwork-by-n if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,m) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ int core_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_complex64_t *work, int ldwork) { // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { coreblas_error("illegal value of side"); return -1; } int nq; // order of Q int nw; // dimension of work if (side == PlasmaLeft) { nq = m; nw = n; } else { nq = n; nw = m; } if (trans != PlasmaNoTrans && trans != Plasma_ConjTrans) { coreblas_error("illegal value of trans"); return -2; } if (m < 0) { coreblas_error("illegal value of m"); return -3; } if (n < 0) { coreblas_error("illegal value of n"); return -4; } if (k < 0 || k > nq) { coreblas_error("illegal value of k"); return -5; } if (ib < 0) { coreblas_error("illegal value of ib"); return -6; } if (A == NULL) { coreblas_error("NULL A"); return -7; } if (lda < imax(1, nq) && nq > 0) { coreblas_error("illegal value of lda"); return -8; } if (T == NULL) { coreblas_error("NULL T"); return -9; } if (ldt < imax(1, ib)) { coreblas_error("illegal value of ldt"); return -10; } if (C == NULL) { coreblas_error("NULL C"); return -11; } if (ldc < imax(1, m) && m > 0) { coreblas_error("illegal value of ldc"); return -12; } if (work == NULL) { coreblas_error("NULL work"); return -13; } if (ldwork < imax(1, nw) && nw > 0) { coreblas_error("illegal value of ldwork"); return -14; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans != PlasmaNoTrans) || (side == PlasmaRight && trans == PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int ni = n; int mi = m; if (side == PlasmaLeft) { // H or H^H is applied to C(i:m,1:n). mi = m - i; ic = i; } else { // H or H^H is applied to C(1:m,i:n). ni = n - i; jc = i; } // Apply H or H^H. LAPACKE_zlarfb_work(LAPACK_COL_MAJOR, lapack_const(side), lapack_const(trans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), mi, ni, kb, &A[lda*i+i], lda, &T[ldt*i], ldt, &C[ldc*jc+ic], ldc, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void core_omp_zunmqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, int ib, const plasma_complex64_t *A, int lda, const plasma_complex64_t *T, int ldt, plasma_complex64_t *C, int ldc, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*k]) \ depend(in:T[0:ib*k]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *W = (plasma_complex64_t*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? n : m; // TODO: double check // Call the kernel. int info = core_zunmqr(side, trans, m, n, k, ib, A, lda, T, ldt, C, ldc, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_zunmqr() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
PhysicalSystemFEM.h
//Design Notes: //Physical System: //MultiVector<ElementTypes> //MultiVector<DOFType> //Store DOFs in continguous memory //Each element // Quadrature // Energy(ShapeFunction, Energy, position) -> Energy(ShapeFunction(position)); // ShapeFunction + Kinematics (DOF + gradients I might need ....) // Gradient // Hessian (maybe, I'm not sure) // How to represent DOFS ? Pointers to list from Physical System ? Has to be, since elements like forces are connections // Energy // //Gradient // //Hessian //How should lists of DOFS work ? // Container<DOFs> #ifndef PHYSICALSYSTEMFEM_H #define PHYSICALSYSTEMFEM_H #include <vector> #include <DOFParticle.h> #include <DOFList.h> #include <UtilitiesEigen.h> namespace Gauss { namespace FEM { template<typename DataType, typename ElementType> class PhysicalSystemFEMImpl { public: //temporary global indices until I update the state to give these to me //automatically PhysicalSystemFEMImpl(const Eigen::Ref<Eigen::MatrixXd > &V, const Eigen::Ref<Eigen::MatrixXi> &F) : m_q(V.rows()), m_qDot(V.rows()) { m_V = V.template cast<DataType>(); m_F = F; m_numVerts = m_V.rows(); m_numElements = m_F.rows(); assert(m_V.cols() == 3); //3D only for now //initialize all the elements Eigen::MatrixXi Felement; std::array<DOFBase<DataType,0> *, ElementType::numDOFs()> qDOFArray; std::array<DOFBase<DataType,1> *, ElementType::numDOFs()> qDotDOFArray; for(unsigned int iel=0; iel < m_numElements; iel++) { for(unsigned int idof=0;idof < ElementType::numDOFs(); ++idof) { qDOFArray[idof] = &m_q[F(iel,idof)]; qDotDOFArray[idof] = &m_qDot[F(iel,idof)]; } Felement = m_F.row(iel); m_elements.push_back( new ElementType(m_V,Felement, qDOFArray, qDotDOFArray) ); } } ~PhysicalSystemFEMImpl() { } DataType getEnergy(const State<DataType> &state) const { double energy = 0.0; for(auto &element : m_elements) { energy += element->getEnergy(state); } return energy; } DataType getKineticEnergy(const State<DataType> &state) const { double energy = 0.0; for(auto &element : m_elements) { energy += element->getKineticEnergy(state); } return energy; } DataType getBodyForceEnergy(const State<DataType> &state) const { DataType energy = 0.0; #if defined(_WIN32) || defined(_WIN64) || defined (WIN32) for(auto &element : m_elements) { energy += element->getBodyForceWork(state); } #else #pragma omp parallel for reduction(+: energy) for(unsigned int ii=0; ii<m_elements.size(); ++ii) { energy = energy + m_elements[ii]->getBodyForceWork(state); } #endif return energy; } DataType getStrainEnergy(const State<DataType> &state) const { DataType energy = 0.0; #if defined(_WIN32) || defined(_WIN64) || defined (WIN32) for(auto &element : m_elements) { energy += element->getStrainEnergy(state); } #else #pragma omp parallel for reduction(+: energy) for(unsigned int ii=0; ii<m_elements.size(); ++ii) { energy = energy + m_elements[ii]->getStrainEnergy(state); } #endif return energy; } decltype(auto) getStrainEnergyPerElement(const State<DataType> &state) const { Eigen::VectorXx<DataType> energyPerElement(m_elements.size()); for(int i=0; i < m_elements.size(); i++) { energyPerElement[i] = m_elements[i]->getStrainEnergy(state); } return energyPerElement; } template<typename Assembler> inline void getMassMatrix(Assembler &assembler, const State<DataType> &state) const { //call the assembler on all elements forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getMassMatrix(assemble,state); }); } template<typename Assembler> inline void getStiffnessMatrix(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getStiffnessMatrix(assemble, state); }); } template<typename Assembler> inline void getForce(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getForce(assemble, state); }); } template<typename Assembler> inline void getInternalForce(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getInternalForce(assemble, state); }); } template<typename Assembler> inline void getBodyForce(Assembler &assembler, const State<DataType> &state) const { forLoop<IsParallel<Assembler>::value>(m_elements, assembler, [&](auto &assemble, auto &element) { element->getBodyForce(assemble, state); }); } inline unsigned int getNumElements() { return m_elements.size(); } inline ElementType * getElement(unsigned int i) { assert(i < m_elements.size()); return m_elements[i]; } inline std::vector<ElementType *> & getElements() { return m_elements; } inline const std::vector<ElementType *> & getElements() const { return m_elements; } inline const ElementType * getElement(unsigned int i) const { assert(i < m_elements.size()); return m_elements[i]; } inline auto & getQ() { return m_q; } inline const auto & getQ() const { return m_q; } inline auto & getQDot() { return m_qDot; } inline const auto & getQDot() const { return m_qDot; } //get function supporting a vertex (these return arrays in order to slot directly into assemblers) inline decltype(auto) getQ(unsigned int vertexId) const { std::array<const DOFBase<DataType,0> *,1> toReturn = {{&m_q[vertexId]}}; return toReturn; } inline decltype(auto) getQDot(unsigned int vertexId) const { std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot[vertexId]}}; return toReturn; } template<typename Vector> inline decltype(auto) getQ(Vector &x, unsigned int elementId) const { std::cout<<"Error not implemented \n"; exit(0); std::array<const DOFBase<DataType,0> *, 1> toReturn = {{&m_q[elementId]}}; return toReturn; } template<typename Vector> inline decltype(auto) getQDot(Vector &x, unsigned int elementId) const { std::cout<<"Error not implemented \n"; exit(0); std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot[elementId]}}; return toReturn; } inline auto & getV() { return m_V; } inline auto & getF() { return m_F; } inline const auto & getV() const { return m_V; } inline const auto & getF() const { return m_F; } //methods for getting current positions and position Jacobians for this system //Per-Vertex inline const auto getPosition(const State<DataType> &state, unsigned int vertexId) const { return getV().row(vertexId).transpose() + mapDOFEigen(m_q[vertexId], state); } inline const auto getVelocity(const State<DataType> &state, unsigned int vertexId) const { return mapDOFEigen(m_qDot[vertexId], state); } inline const auto getDPDQ(const State<DataType> &state, unsigned int vertexId) const { return Eigen::Matrix33x<DataType>::Identity(); } inline const auto getDPDQ(const State<DataType> &state, unsigned int elementId, const Eigen::Vector3x<DataType> &pos) const { exit(0); return Eigen::Matrix33x<DataType>::Identity(); } //want these for elements as well (i.e take an element indec and a point in space and return the right value) inline auto getGeometry() { return std::make_pair(std::ref(m_V), std::ref(m_F)); } inline const auto getGeometry() const { return std::make_pair(std::ref(m_V), std::ref(m_F)); } protected: //Mesh Eigen::MatrixXx<DataType> m_V; Eigen::MatrixXi m_F; long m_numVerts; long m_numElements; DOFList<DataType, DOFParticle, 0> m_q; DOFList<DataType, DOFParticle, 1> m_qDot; std::vector<ElementType *> m_elements; //DataType m_mass; //mass of particle //DOFParticle<DataType,0> m_x; //DOFParticle<DataType,1> m_xDot; private: }; template<typename DataType, template <typename A> class ElementType> using PhysicalSystemFEM = PhysicalSystem<DataType, PhysicalSystemFEMImpl<DataType, ElementType<DataType> > >; } } #endif
reto.c
/****************************************************************************** * FILE: mpi_mm.c * DESCRIPTION: * MPI Matrix Multiply - C Version * In this code, the master task distributes a matrix multiply * operation to numtasks-1 worker tasks. * NOTE: C and Fortran versions of this code differ because of the way * arrays are stored/passed. C arrays are row-major order but Fortran * arrays are column-major order. * AUTHOR: Blaise Barney. Adapted from Ros Leibensperger, Cornell Theory * Center. Converted to MPI: George L. Gusciora, MHPCC (1/95) * LAST REVISED: 04/13/05 ******************************************************************************/ #include "mpi.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> #define NRA 62 /* number of rows in matrix A */ #define NCA 15 /* number of columns in matrix A */ #define NCB 7 /* number of columns in matrix B */ #define MASTER 0 /* taskid of first task */ #define FROM_MASTER 1 /* setting a message type */ #define FROM_WORKER 2 /* setting a message type */ int main (int argc, char *argv[]) { int numtasks, /* number of tasks in partition */ taskid, /* a task identifier */ numworkers, /* number of worker tasks */ source, /* task id of message source */ dest, /* task id of message destination */ mtype, /* message type */ rows, /* rows of matrix A sent to each worker */ averow, extra, offset, /* used to determine rows sent to each worker */ i, j, k, rc; /* misc */ double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ double start,end; MPI_Status status; MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD,&taskid); MPI_Comm_size(MPI_COMM_WORLD,&numtasks); if (numtasks < 2 ) { printf("Need at least two MPI tasks. Quitting...\n"); MPI_Abort(MPI_COMM_WORLD, rc); exit(1); } numworkers = numtasks-1; start = MPI_Wtime(); /**************************** master task ************************************/ if (taskid == MASTER) { printf("mpi_mm has started with %d tasks.\n",numtasks); printf("Initializing arrays...\n"); for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; /* Send matrix data to the worker tasks */ averow = NRA/numworkers; extra = NRA%numworkers; offset = 0; mtype = FROM_MASTER; for (dest=1; dest<=numworkers; dest++) { rows = (dest <= extra) ? averow+1 : averow; printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset); MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD); MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD); MPI_Send(&a[offset][0], rows*NCA, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD); MPI_Send(&b, NCA*NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD); offset = offset + rows; } /* Receive results from worker tasks */ mtype = FROM_WORKER; for (i=1; i<=numworkers; i++) { source = i; MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); MPI_Recv(&c[offset][0], rows*NCB, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status); printf("Received results from task %d\n",source); } /* Print results */ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { printf("\n"); for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); } printf("\n******************************************************\n"); printf ("Done.\n"); } /**************************** worker task ************************************/ if (taskid > MASTER) { mtype = FROM_MASTER; MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); // #pragma omp parallel shared(c,a,b) private(k,i) // { for (k=0; k<NCB; k++) for (i=0; i<rows; i++) { c[i][k] = 0.0; for (j=0; j<NCA; j++) c[i][k] = a[i][j] + b[j][k]; } // } mtype = FROM_WORKER; MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD); MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD); MPI_Send(&c, rows*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD); } end = MPI_Wtime(); MPI_Finalize(); printf("diff time = %f\n",end - start); }
opencl_sxc_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sxc); #else #include <string.h> #include "sha.h" #include <openssl/blowfish.h> #include "aes.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #define FORMAT_LABEL "sxc-opencl" #define FORMAT_NAME "StarOffice .sxc" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(sxc_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 typedef struct { uint32_t length; uint8_t v[20]; // hash of password } sxc_password; typedef struct { uint32_t v[16/4]; } sxc_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } sxc_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } sxc_cpu_salt; static sxc_cpu_salt *cur_salt; static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; static cl_int cl_error; static sxc_password *inbuffer; static sxc_hash *outbuffer; static sxc_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(sxc_password) * gws; outsize = sizeof(sxc_hash) * gws; settingsize = sizeof(sxc_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(gws, sizeof(*saved_key)); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(sxc_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$sxc$*", 6)) return 0; /* handle 'chopped' .pot lines */ if (ldr_isa_pot_source(ciphertext)) return 1; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p) != BINARY_SIZE * 2) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p) != res * 2) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static sxc_cpu_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (sxc_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #undef set_key static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { unsigned char hash[20]; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA1_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 20); inbuffer[index].length = 20; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { BF_KEY bf_key; SHA_CTX ctx; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char*)outbuffer[index].v); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { sxc_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_opencl_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, sxc_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
nodal_two_step_v_p_strategy_for_FSI.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2018 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H #define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_for_FSI.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include "nodal_two_step_v_p_strategy.h" #include "nodal_two_step_v_p_strategy_for_FSI.h" #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class NodalTwoStepVPStrategyForFSI : public NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategyForFSI); /// Counted pointer of NodalTwoStepVPStrategy //typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; /// Node type (default is: Node<3>) typedef Node<3> NodeType; /// Geometry type (using with given NodeType) typedef Geometry<NodeType> GeometryType; typedef std::size_t SizeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mVelocityTolerance; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mPressureTolerance; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mMaxPressureIter; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mDomainSize; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mTimeOrder; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mReformDofSet; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpMomentumStrategy; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpPressureStrategy; typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::InitializeStrategy(rSolverConfig); } NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart, pVelocityLinearSolver, pPressureLinearSolver, ReformDofSet, VelTol, PresTol, MaxPressureIterations, TimeOrder, DomainSize) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~NodalTwoStepVPStrategyForFSI() {} bool SolveSolutionStep() override { // Initialize BDF2 coefficients ModelPart &rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; bool converged = false; unsigned int maxNonLinearIterations = mMaxPressureIter; std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl; if (timeIntervalChanged == true && currentTime > 10 * timeInterval) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; double pressureNorm = 0; double velocityNorm = 0; // bool momentumAlreadyConverged=false; // bool continuityAlreadyConverged=false; /* boost::timer solve_step_time; */ // std::cout<<" InitializeSolutionStep().... "<<std::endl; // this->UnactiveSliverElements(); //this is done in set_active_flag_mesher_process which is activated from fluid_pre_refining_mesher.py InitializeSolutionStep(); // it fills SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids and inner solids for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; if (it == 0) { ComputeNodalVolumeAndAssignFlagToElementType(); // it assings NODAL_VOLUME to fluid and SOLID_NODAL_VOLUME to solid. Interface nodes have both this->InitializeNonLinearIterations(); // it fills SOLID_NODAL_SFD_NEIGHBOURS for solids and NODAL_SFD_NEIGHBOURS for fluids } // std::cout<<" CalcNodalStrainsAndStresses .... "<<std::endl; CalcNodalStrainsAndStresses(); // it computes stresses and strains for fluid and solid nodes // std::cout<<" CalcNodalStrainsAndStresses DONE "<<std::endl; momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm); UpdateTopology(rModelPart, BaseType::GetEchoLevel()); // std::cout<<" ComputeNodalVolume .... "<<std::endl; ComputeNodalVolume(); // std::cout<<" ComputeNodalVolume DONE "<<std::endl; this->InitializeNonLinearIterations(); // std::cout<<" InitializeNonLinearIterations DONE "<<std::endl; CalcNodalStrains(); // std::cout<<" CalcNodalStrains DONE "<<std::endl; if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm); } // if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // momentumAlreadyConverged=true; // } // if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // continuityAlreadyConverged=true; // } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1)) { //this->ComputeErrorL2NormCaseImposedG(); //this->ComputeErrorL2NormCasePoiseuille(); this->CalculateAccelerations(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } bool hybridMethod = false; if (hybridMethod == true) { if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 0)) { this->UpdateElementalStressStrain(); } } if ((continuityConverged && momentumConverged) && it > 1) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); converged = true; std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } if (fixedTimeStep == true) { break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ return converged; } void UpdateElementalStressStrain() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { /* itElem-> InitializeElementStrainStressState(); */ itElem->InitializeSolutionStep(rCurrentProcessInfo); } } } void Initialize() override { std::cout << " \n Initialize in nodal_two_step_v_p_strategy_FSI" << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size(); unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) { rNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) { rSpatialDefRate.resize(sizeStrains, false); } noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) { rFgrad.resize(dimension, dimension, false); } noalias(rFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) { rFgradVel.resize(dimension, dimension, false); } noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS)) { Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); if (rSolidNodalStress.size() != sizeStrains) { rSolidNodalStress.resize(sizeStrains, false); } noalias(rSolidNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have SOLID_NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); if (rSolidNodalStress.size() != sizeStrains) { rSolidNodalStress.resize(sizeStrains, false); } noalias(rSolidNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have SOLID_NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have SOLID_NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have SOLID_NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS)) { Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh) { rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have SOLID_NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE)) { Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); if (rSolidSpatialDefRate.size() != sizeStrains) { rSolidSpatialDefRate.resize(sizeStrains, false); } noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have SOLID_NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD)) { Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); if (rSolidFgrad.size1() != dimension) { rSolidFgrad.resize(dimension, dimension, false); } noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (rSolidFgradVel.size1() != dimension) { rSolidFgradVel.resize(dimension, dimension, false); } noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } AssignMaterialToEachNode(itNode); } // } } void AssignMaterialToEachNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = 0; double volumetricCoeff = 0; if (itNode->Is(SOLID)) { const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); //deviatoricCoeff=deltaT*secondLame deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; //volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3) volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0; } else if (itNode->Is(FLUID) || itNode->Is(RIGID)) { deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); } if ((itNode->Is(SOLID) && itNode->Is(RIGID))) { itNode->FastGetSolutionStepValue(INTERFACE_NODE) = true; } else { itNode->FastGetSolutionStepValue(INTERFACE_NODE) = false; } const double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0; //currFirstLame=deltaT*firstLame itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame; itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff; } void UnactiveSliverElements() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); MesherUtilities MesherUtils; const double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart); const double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size()); double ElementalVolume = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); if (numNodes == (dimension + 1)) { if (dimension == 2) { ElementalVolume = (itElem)->GetGeometry().Area(); } else if (dimension == 3) { ElementalVolume = (itElem)->GetGeometry().Volume(); } if (ElementalVolume < CriticalVolume) { // std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl; (itElem)->Set(ACTIVE, false); } else { (itElem)->Set(ACTIVE, true); } } } } KRATOS_CATCH(""); } void ComputeNodalVolume() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; if (itElem->Is(SOLID)) { double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); solidVolume += elementalVolume; nodalVolume += -elementalVolume; // if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before // nodalVolume += -elementalVolume; // } } } } // } } void ComputeNodalVolumeAndAssignFlagToElementType() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); unsigned int fluidNodes = 0; unsigned int solidNodes = 0; unsigned int interfaceNodes = 0; for (unsigned int i = 0; i < numNodes; i++) { if ((geometry(i)->Is(FLUID) && geometry(i)->IsNot(SOLID)) || (geometry(i)->Is(FLUID) && geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true)) { fluidNodes += 1; } if (geometry(i)->Is(SOLID)) { solidNodes += 1; } if (geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true) { interfaceNodes += 1; } } if (solidNodes == numNodes) { itElem->Set(SOLID); // std::cout<<"THIS SOLID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if (interfaceNodes == numNodes) { itElem->Set(SOLID); // std::cout<<"THIS INTERFACE ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if (fluidNodes == numNodes) { itElem->Set(FLUID); // std::cout<<"THIS FLUID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if (solidNodes == numNodes && fluidNodes == numNodes) { itElem->Reset(FLUID); // std::cout<<"THIS ELEMENT WAS BOTH FLUID AND SOLID "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; if (itElem->Is(SOLID)) { double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); solidVolume += elementalVolume; nodalVolume += -elementalVolume; // if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before // nodalVolume += -elementalVolume; // } // if(interfaceNodes==numNodes && solidDensity==0){ // std::cout<<"This interface element has not a correct density....I am assigning it the fluid density----- TODO: IMPROVE IT, TAKE FROM NEIGHBOURS"<<std::endl; // double density=geometry(i)->FastGetSolutionStepValue(DENSITY); // geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=density; // } } } } // } } void InitializeSolutionStep() override { FillNodalSFDVector(); } void FillNodalSFDVector() { // std::cout << "FillNodalSFDVector(); ... " << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { // ModelPart::NodeIterator NodesBegin; // ModelPart::NodeIterator NodesEnd; // OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); // for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) // { for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == false) { this->SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER if (itNode->Is(SOLID)) { SetNeighboursOrderToSolidNode(itNode); // it assigns neighbours to solid inner nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER } } else { SetNeighboursOrderToInterfaceNode(itNode); // it assigns neighbours to interface nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids } } // } // std::cout << "FillNodalSFDVector(); DONE " << std::endl; } void SetNeighboursOrderToSolidNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if (rNodeOrderedNeighbours.size() != neighbourNodes) rNodeOrderedNeighbours.resize(neighbourNodes, false); noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes); rNodeOrderedNeighbours[0] = itNode->Id(); if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id(); } } } void SetNeighboursOrderToInterfaceNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int fluidCounter = 1; unsigned int solidCounter = 1; if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true) { fluidCounter += 1; } if (neighb_nodes[k].Is(SOLID)) { solidCounter += 1; } } } Vector &rFluidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector &rSolidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if (rFluidNodeOrderedNeighbours.size() != fluidCounter) rFluidNodeOrderedNeighbours.resize(fluidCounter, false); if (rSolidNodeOrderedNeighbours.size() != solidCounter) rSolidNodeOrderedNeighbours.resize(solidCounter, false); noalias(rFluidNodeOrderedNeighbours) = ZeroVector(fluidCounter); noalias(rSolidNodeOrderedNeighbours) = ZeroVector(solidCounter); rFluidNodeOrderedNeighbours[0] = itNode->Id(); rSolidNodeOrderedNeighbours[0] = itNode->Id(); fluidCounter = 0; solidCounter = 0; if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true) { fluidCounter += 1; rFluidNodeOrderedNeighbours[fluidCounter] = neighb_nodes[k].Id(); } if (neighb_nodes[k].Is(SOLID)) { solidCounter += 1; rSolidNodeOrderedNeighbours[solidCounter] = neighb_nodes[k].Id(); } } } // fluidCounter+=1; // solidCounter+=1; // ModelPart& rModelPart = BaseType::GetModelPart(); // const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // const unsigned int sizeFluidSDFNeigh=fluidCounter*dimension; // const unsigned int sizeSolidSDFNeigh=solidCounter*dimension; // Vector& rFluidNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); // Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); // if(rFluidNodalSFDneighbours.size() != sizeFluidSDFNeigh) // rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,false); // if(rSolidNodalSFDneighbours.size() != sizeSolidSDFNeigh) // rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,false); // noalias(rFluidNodalSFDneighbours)=ZeroVector(sizeFluidSDFNeigh); // noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSolidSDFNeigh); // rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,true); // rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,true); // std::cout<<"rFluidNodeOrderedNeighbours "<<rFluidNodeOrderedNeighbours<<std::endl; // std::cout<<"rSolidNodeOrderedNeighbours "<<rSolidNodeOrderedNeighbours<<std::endl; // std::cout<<"rFluidNodalSFDneighbours "<<rFluidNodalSFDneighbours<<std::endl; // std::cout<<"rSolidNodalSFDneighbours "<<rSolidNodalSFDneighbours<<std::endl; } void InitializeNodalVariablesForSolidRemeshedDomain(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS)) { Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); if (rSolidNodalStress.size() != sizeStrains) rSolidNodalStress.resize(sizeStrains, false); noalias(rSolidNodalStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rSolidNodalDevStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); if (rSolidNodalDevStress.size() != sizeStrains) rSolidNodalDevStress.resize(sizeStrains, false); noalias(rSolidNodalDevStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS)) { Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh) rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false); noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS_ORDER)) { Vector &rSolidNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if (rSolidNodalSFDneighboursOrder.size() != neighbourNodes) rSolidNodalSFDneighboursOrder.resize(neighbourNodes, false); noalias(rSolidNodalSFDneighboursOrder) = ZeroVector(neighbourNodes); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE)) { Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); if (rSolidSpatialDefRate.size() != sizeStrains) rSolidSpatialDefRate.resize(sizeStrains, false); noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD)) { Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); if (rSolidFgrad.size1() != dimension) rSolidFgrad.resize(dimension, dimension, false); noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (rSolidFgradVel.size1() != dimension) rSolidFgradVel.resize(dimension, dimension, false); noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUMETRIC_DEF_RATE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0; } } void CalcNodalStrainsAndStresses() { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); double theta = 0.5; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { if (nodalVolume > 0) { Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (interfaceFgrad.size1() != dimension) interfaceFgrad.resize(dimension, dimension, false); if (interfaceFgradVel.size1() != dimension) interfaceFgradVel.resize(dimension, dimension, false); noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension); //I have to compute the stresses and strains two times because one time is for the solid and the other for the fluid // Matrix interfaceFgrad=ZeroMatrix(dimension,dimension); // Matrix interfaceFgradVel=ZeroMatrix(dimension,dimension); //the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes. ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel); // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel; CalcNodalStrainsAndStressesForInterfaceFluidNode(itNode); } if (solidNodalVolume > 0) { Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (solidInterfaceFgrad.size1() != dimension) solidInterfaceFgrad.resize(dimension, dimension, false); if (solidInterfaceFgradVel.size1() != dimension) solidInterfaceFgradVel.resize(dimension, dimension, false); noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension); theta = 1.0; // Matrix solidInterfaceFgrad=ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgradVel=ZeroMatrix(dimension,dimension); ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel); // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad; // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel; CalcNodalStrainsAndStressesForInterfaceSolidNode(itNode); } } else { if (itNode->Is(SOLID) && solidNodalVolume > 0) { theta = 1.0; ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta); CalcNodalStrainsAndStressesForSolidNode(itNode); } else if (nodalVolume > 0) { theta = 0.5; this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsAndStressesForNode(itNode); } } if (nodalVolume == 0 && solidNodalVolume == 0) { // if nodalVolume==0 theta = 0.5; this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); } // } // if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){ // CopyValuesToSolidNonInterfaceNodes(itNode); // } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void CopyValuesToSolidNonInterfaceNodes(ModelPart::NodeIterator itNode) { Vector &solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector &solidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); Vector &solidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); double &volumetricDefRate = itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE); Vector &solidCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); Vector &solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); unsigned int sizeNodalSFDneighboursId = nodalSFDneighboursId.size(); solidNodalSFDneighboursId.resize(sizeNodalSFDneighboursId, false); Vector nodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeNodalSFDneigh = nodalSFDneigh.size(); solidNodalSFDneigh.resize(sizeNodalSFDneigh, false); solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); solidNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); solidInterfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); solidSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE); solidCauchyStress = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS); solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); } void CalcNodalStrainsAndStressesForInterfaceFluidNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); const double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } const double currFirstLame = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); const double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; // if(itNode->Is(SOLID)) // { // nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; // nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; // nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; // nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; // nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; // nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; // } itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); const double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; // if(itNode->Is(SOLID)) // { // nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; // nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; // nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; // nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3]; // nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4]; // nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5]; // nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; // nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; // nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; // nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3]; // nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4]; // nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5]; // } itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsAndStressesForInterfaceSolidNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)); double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); const double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; } auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0); r_stress_tensor2D[0] = nodalSigmaTot_xx; r_stress_tensor2D[1] = nodalSigmaTot_yy; r_stress_tensor2D[2] = nodalSigmaTot_xy; auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor2D[0] = nodalSigmaDev_xx; r_dev_stress_tensor2D[1] = nodalSigmaDev_yy; r_dev_stress_tensor2D[2] = nodalSigmaDev_xy; } else if (dimension == 3) { auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2); r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); const double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3]; nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4]; nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3]; nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4]; nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5]; } auto &r_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0); r_stress_tensor3D[0] = nodalSigmaTot_xx; r_stress_tensor3D[1] = nodalSigmaTot_yy; r_stress_tensor3D[2] = nodalSigmaTot_zz; r_stress_tensor3D[3] = nodalSigmaTot_xy; r_stress_tensor3D[4] = nodalSigmaTot_xz; r_stress_tensor3D[5] = nodalSigmaTot_yz; auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor3D[0] = nodalSigmaDev_xx; r_dev_stress_tensor3D[1] = nodalSigmaDev_yy; r_dev_stress_tensor3D[2] = nodalSigmaDev_zz; r_dev_stress_tensor3D[3] = nodalSigmaDev_xy; r_dev_stress_tensor3D[4] = nodalSigmaDev_xz; r_dev_stress_tensor3D[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsAndStressesForSolidNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)); double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } // if(itNode->Is(SOLID)){ // std::cout<<"solid node"<<std::endl; // } // if(itNode->Is(FLUID)){ // std::cout<<"FLUID node"<<std::endl; // } // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // std::cout<<"currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl; // }else{ // std::cout<<"NOT INTERFACE currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl; // } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); const double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; } auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0); r_stress_tensor2D[0] = nodalSigmaTot_xx; r_stress_tensor2D[1] = nodalSigmaTot_yy; r_stress_tensor2D[2] = nodalSigmaTot_xy; auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor2D[0] = nodalSigmaDev_xx; r_dev_stress_tensor2D[1] = nodalSigmaDev_yy; r_dev_stress_tensor2D[2] = nodalSigmaDev_xy; } else if (dimension == 3) { auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2); r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); const double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3]; nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4]; nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3]; nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4]; nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5]; } auto &r_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0); r_tensor3D[0] = nodalSigmaTot_xx; r_tensor3D[1] = nodalSigmaTot_yy; r_tensor3D[2] = nodalSigmaTot_zz; r_tensor3D[3] = nodalSigmaTot_xy; r_tensor3D[4] = nodalSigmaTot_xz; r_tensor3D[5] = nodalSigmaTot_yz; auto &r_dev_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_tensor3D[0] = nodalSigmaDev_xx; r_dev_tensor3D[1] = nodalSigmaDev_yy; r_dev_tensor3D[2] = nodalSigmaDev_zz; r_dev_tensor3D[3] = nodalSigmaDev_xy; r_dev_tensor3D[4] = nodalSigmaDev_xz; r_dev_tensor3D[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsForSolidNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); // Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); // double detFgrad=1.0; // Matrix InvFgrad=ZeroMatrix(dimension,dimension); // Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); double detFgrad = 1.0; Matrix nodalFgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); nodalFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; const double DefVol = DefX + DefY; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; const double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } } void CalcNodalStrainsForInterfaceSolidNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; const double DefVol = DefX + DefY; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; const double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void CalcNodalStrains() { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); double theta = 1.0; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { if (nodalVolume > 0) { //I have to compute the strains two times because one time is for the solid and the other for the fluid Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (interfaceFgrad.size1() != dimension) interfaceFgrad.resize(dimension, dimension, false); if (interfaceFgradVel.size1() != dimension) interfaceFgradVel.resize(dimension, dimension, false); noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension); // Matrix interfaceFgrad = ZeroMatrix(dimension,dimension); // Matrix interfaceFgradVel = ZeroMatrix(dimension,dimension); //the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes. ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel); // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel; this->CalcNodalStrainsForNode(itNode); } if (solidNodalVolume > 0) { Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (solidInterfaceFgrad.size1() != dimension) solidInterfaceFgrad.resize(dimension, dimension, false); if (solidInterfaceFgradVel.size1() != dimension) solidInterfaceFgradVel.resize(dimension, dimension, false); noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension); // Matrix solidInterfaceFgrad = ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgradVel = ZeroMatrix(dimension,dimension); ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel); // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad; // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel; CalcNodalStrainsForInterfaceSolidNode(itNode); } } else { if (itNode->Is(SOLID) && solidNodalVolume > 0) { ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta); CalcNodalStrainsForSolidNode(itNode); } else if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsForNode(itNode); } } if (nodalVolume == 0 && solidNodalVolume == 0) { // if nodalVolume==0 this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); } // if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){ // CopyValuesToSolidNonInterfaceNodes(itNode); // } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void ComputeAndStoreNodalDeformationGradientForSolidNode(ModelPart::NodeIterator itNode, double theta) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); Matrix Fgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; unsigned int neigh_nodes_id = neighb_nodes[i].Id(); unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; if (neigh_nodes_id != other_neigh_nodes_id) { std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl; } Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; } } } itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD) = Fgrad; itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL) = FgradVel; KRATOS_CATCH(""); } void ComputeAndStoreNodalDeformationGradientForInterfaceNode(ModelPart::NodeIterator itNode, Vector nodalSFDneighboursId, Vector rNodalSFDneigh, double theta, Matrix &Fgrad, Matrix &FgradVel) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); noalias(Fgrad) = ZeroMatrix(dimension, dimension); noalias(FgradVel) = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighNodesSize = neighb_nodes.size(); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; for (unsigned int k = 0; k < neighNodesSize; k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; Fgrad(0, 0) += dNdXi * neighb_nodes[k].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[k].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y(); VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; break; } } } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; for (unsigned int k = 0; k < neighNodesSize; k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[k].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[k].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[k].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[k].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[k].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[k].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[k].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; break; } } } } } // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=Fgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=FgradVel; KRATOS_CATCH(""); } void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; // std::cout<<" UpdateTopology ..."<<std::endl; /* this->CalculateDisplacements(); */ CalculateDisplacementsAndResetNodalVariables(); BaseType::MoveMesh(); BoundaryNormalsCalculationUtilities BoundaryComputation; BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); // std::cout<<" UpdateTopology DONE"<<std::endl; KRATOS_CATCH(""); } void CalculateDisplacementsAndResetNodalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; if (dimension == 3) { CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } ///// reset Nodal variables ////// Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeSDFNeigh = rNodalSFDneighbours.size(); // unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1; // unsigned int sizeSDFNeigh=neighbourNodes*dimension; i->FastGetSolutionStepValue(NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); noalias(rFgrad) = ZeroMatrix(dimension, dimension); Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); // if(i->FastGetSolutionStepValue(INTERFACE_NODE)==true){ Vector &rSolidNodalSFDneighbours = i->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); unsigned int solidSizeSDFNeigh = rSolidNodalSFDneighbours.size(); // unsigned int solidSizeSDFNeigh=solidNeighbourNodes*dimension; i->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rSolidNodalSFDneighbours) = ZeroVector(solidSizeSDFNeigh); Vector &rSolidSpatialDefRate = i->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rSolidFgrad = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension); Matrix &rSolidFgradVel = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension); // } } // } } /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "NodalTwoStepVPStrategyForFSI"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "NodalTwoStepVPStrategyForFSI"; } // /// Print object's data. // void PrintData(std::ostream& rOStream) const override // { // } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. NodalTwoStepVPStrategyForFSI &operator=(NodalTwoStepVPStrategyForFSI const &rOther) {} /// Copy constructor. NodalTwoStepVPStrategyForFSI(NodalTwoStepVPStrategyForFSI const &rOther) {} ///@} }; /// Class NodalTwoStepVPStrategyForFSI ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
transfer_utility.h
/* ============================================================================== KratosIncompressibleFluidApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: pbecker $ // Date: $Date: 2011-09-21 12:30:32 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_MOVE_PART_UTILITY_FLUID_ONLY_DIFF2_INCLUDED ) #define KRATOS_MOVE_PART_UTILITY_FLUID_ONLY_DIFF2_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" /// #include "includes/dof.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" #include "processes/node_erase_process.h" /// #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "pfem_2_application_variables.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtilityDiffFluidOnly { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(TransferUtility); //template<unsigned int TDim> TransferUtility(ModelPart& calculation_model_part, ModelPart& topographic_model_part) : mcalculation_model_part(calculation_model_part) , mtopographic_model_part(topographic_model_part) { KRATOS_WATCH("initializing transfer utility") ProcessInfo& CurrentProcessInfo = mcalculation_model_part.GetProcessInfo(); //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! /* ModelPart::ElementsContainerType::iterator ielembegin = mcalculation_model_part.ElementsBegin(); for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id(); */ //CONSTRUCTING BIN STRUCTURE ContainerType& rElements = mtopographic_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); } ~TransferUtility() {} void GatherInformationFromTopographicDomain() { KRATOS_TRY KRATOS_WATCH("Gathering Information From Topographic Domain ") ProcessInfo& CurrentProcessInfo = mcalculation_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; const unsigned int max_results = 1000; //array_1d<double,TDim+1> N; //const int max_results = 1000; ModelPart::NodesContainerType::iterator inodebegin = mcalculation_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mcalculation_model_part.Nodes().size(), node_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { array_1d<double,TDim+1> N; ResultContainerType results(max_results); ResultIteratorType result_begin = results.begin(); for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { if ( (results.size()) !=max_results) results.resize(max_results); //const int & elem_id = ielem->Id(); ModelPart::NodesContainerType::iterator inode = inodebegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: } } } KRATOS_CATCH("") } protected: private: ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( //int last_element, array_1d<double,3>& position, array_1d<double,TDim+1>& N, //Element::Pointer pelement, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. //ModelPart::ElementsContainerType::iterator i = mr_model_part.ElementsBegin()+last_element; Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { //pelement = (*(i)); return true; } //KRATOS_WATCH("will look in another element") //KRATOS_WATCH(TDim) //to begin with we check the neighbour elements: GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); return true; } } //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(coords, result_begin, MaxNumberOfResults ); //KRATOS_WATCH(results_found) if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { //std::cout<< "KIIIIIIIIIIIIII" << std::endl; //KRATOS_WATCH((*(result_begin+i))->Id()); Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); //KRATOS_WATCH("ln243"); //KRATOS_WATCH(N); if(is_found == true) { //pelement.clear(); //pelement.push_back( Element::WeakPointer((*(result_begin+i).base()))); pelement=Element::Pointer((*(result_begin+i).base())); return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.0000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } ModelPart& mcalculation_model_part; ModelPart& mtopographic_model_part; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; }; } // namespace Kratos. #endif // KRATOS_MOVE_PART_UTILITY_DIFF2_INCLUDED defined