source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
convolution_1x1_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(1, num_input, num_output);
weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)4 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = weight_data_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
float* g00 = g0.row(p / 8);
g00[0] = k00[0];
g00[1] = k10[0];
g00[2] = k20[0];
g00[3] = k30[0];
g00[4] = k40[0];
g00[5] = k50[0];
g00[6] = k60[0];
g00[7] = k70[0];
g00 += 8;
g00[0] = k01[0];
g00[1] = k11[0];
g00[2] = k21[0];
g00[3] = k31[0];
g00[4] = k41[0];
g00[5] = k51[0];
g00[6] = k61[0];
g00[7] = k71[0];
g00 += 8;
g00[0] = k02[0];
g00[1] = k12[0];
g00[2] = k22[0];
g00[3] = k32[0];
g00[4] = k42[0];
g00[5] = k52[0];
g00[6] = k62[0];
g00[7] = k72[0];
g00 += 8;
g00[0] = k03[0];
g00[1] = k13[0];
g00[2] = k23[0];
g00[3] = k33[0];
g00[4] = k43[0];
g00[5] = k53[0];
g00[6] = k63[0];
g00[7] = k73[0];
g00 += 8;
g00[0] = k04[0];
g00[1] = k14[0];
g00[2] = k24[0];
g00[3] = k34[0];
g00[4] = k44[0];
g00[5] = k54[0];
g00[6] = k64[0];
g00[7] = k74[0];
g00 += 8;
g00[0] = k05[0];
g00[1] = k15[0];
g00[2] = k25[0];
g00[3] = k35[0];
g00[4] = k45[0];
g00[5] = k55[0];
g00[6] = k65[0];
g00[7] = k75[0];
g00 += 8;
g00[0] = k06[0];
g00[1] = k16[0];
g00[2] = k26[0];
g00[3] = k36[0];
g00[4] = k46[0];
g00[5] = k56[0];
g00[6] = k66[0];
g00[7] = k76[0];
g00 += 8;
g00[0] = k07[0];
g00[1] = k17[0];
g00[2] = k27[0];
g00[3] = k37[0];
g00[4] = k47[0];
g00[5] = k57[0];
g00[6] = k67[0];
g00[7] = k77[0];
g00 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
__m256 _r8 = _mm256_loadu_ps(img0 + 64);
__m256 _r9 = _mm256_loadu_ps(img0 + 72);
__m256 _r10 = _mm256_loadu_ps(img0 + 80);
__m256 _r11 = _mm256_loadu_ps(img0 + 88);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
_mm256_storeu_ps(tmpptr + 64, _r8);
_mm256_storeu_ps(tmpptr + 72, _r9);
_mm256_storeu_ps(tmpptr + 80, _r10);
_mm256_storeu_ps(tmpptr + 88, _r11);
tmpptr += 96;
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
tmpptr += 64;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
tmpptr += 32;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
tmpptr += 16;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
_mm256_storeu_ps(tmpptr, _r0);
tmpptr += 8;
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float* outptr = out;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
__m256 _sum8 = _bias0;
__m256 _sum9 = _bias0;
__m256 _sum10 = _bias0;
__m256 _sum11 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7);
__m256 _val80 = _mm256_broadcast_ss(tmpptr + 64);
__m256 _val81 = _mm256_broadcast_ss(tmpptr + 65);
__m256 _val82 = _mm256_broadcast_ss(tmpptr + 66);
__m256 _val83 = _mm256_broadcast_ss(tmpptr + 67);
__m256 _val84 = _mm256_broadcast_ss(tmpptr + 68);
__m256 _val85 = _mm256_broadcast_ss(tmpptr + 69);
__m256 _val86 = _mm256_broadcast_ss(tmpptr + 70);
__m256 _val87 = _mm256_broadcast_ss(tmpptr + 71);
__m256 _val90 = _mm256_broadcast_ss(tmpptr + 72);
__m256 _val91 = _mm256_broadcast_ss(tmpptr + 73);
__m256 _val92 = _mm256_broadcast_ss(tmpptr + 74);
__m256 _val93 = _mm256_broadcast_ss(tmpptr + 75);
__m256 _val94 = _mm256_broadcast_ss(tmpptr + 76);
__m256 _val95 = _mm256_broadcast_ss(tmpptr + 77);
__m256 _val96 = _mm256_broadcast_ss(tmpptr + 78);
__m256 _val97 = _mm256_broadcast_ss(tmpptr + 79);
_sum8 = _mm256_fmadd_ps(_w0, _val80, _sum8);
_sum8 = _mm256_fmadd_ps(_w1, _val81, _sum8);
_sum8 = _mm256_fmadd_ps(_w2, _val82, _sum8);
_sum8 = _mm256_fmadd_ps(_w3, _val83, _sum8);
_sum8 = _mm256_fmadd_ps(_w4, _val84, _sum8);
_sum8 = _mm256_fmadd_ps(_w5, _val85, _sum8);
_sum8 = _mm256_fmadd_ps(_w6, _val86, _sum8);
_sum8 = _mm256_fmadd_ps(_w7, _val87, _sum8);
_sum9 = _mm256_fmadd_ps(_w0, _val90, _sum9);
_sum9 = _mm256_fmadd_ps(_w1, _val91, _sum9);
_sum9 = _mm256_fmadd_ps(_w2, _val92, _sum9);
_sum9 = _mm256_fmadd_ps(_w3, _val93, _sum9);
_sum9 = _mm256_fmadd_ps(_w4, _val94, _sum9);
_sum9 = _mm256_fmadd_ps(_w5, _val95, _sum9);
_sum9 = _mm256_fmadd_ps(_w6, _val96, _sum9);
_sum9 = _mm256_fmadd_ps(_w7, _val97, _sum9);
__m256 _val100 = _mm256_broadcast_ss(tmpptr + 80);
__m256 _val101 = _mm256_broadcast_ss(tmpptr + 81);
__m256 _val102 = _mm256_broadcast_ss(tmpptr + 82);
__m256 _val103 = _mm256_broadcast_ss(tmpptr + 83);
__m256 _val104 = _mm256_broadcast_ss(tmpptr + 84);
__m256 _val105 = _mm256_broadcast_ss(tmpptr + 85);
__m256 _val106 = _mm256_broadcast_ss(tmpptr + 86);
__m256 _val107 = _mm256_broadcast_ss(tmpptr + 87);
__m256 _val110 = _mm256_broadcast_ss(tmpptr + 88);
__m256 _val111 = _mm256_broadcast_ss(tmpptr + 89);
__m256 _val112 = _mm256_broadcast_ss(tmpptr + 90);
__m256 _val113 = _mm256_broadcast_ss(tmpptr + 91);
__m256 _val114 = _mm256_broadcast_ss(tmpptr + 92);
__m256 _val115 = _mm256_broadcast_ss(tmpptr + 93);
__m256 _val116 = _mm256_broadcast_ss(tmpptr + 94);
__m256 _val117 = _mm256_broadcast_ss(tmpptr + 95);
_sum10 = _mm256_fmadd_ps(_w0, _val100, _sum10);
_sum10 = _mm256_fmadd_ps(_w1, _val101, _sum10);
_sum10 = _mm256_fmadd_ps(_w2, _val102, _sum10);
_sum10 = _mm256_fmadd_ps(_w3, _val103, _sum10);
_sum10 = _mm256_fmadd_ps(_w4, _val104, _sum10);
_sum10 = _mm256_fmadd_ps(_w5, _val105, _sum10);
_sum10 = _mm256_fmadd_ps(_w6, _val106, _sum10);
_sum10 = _mm256_fmadd_ps(_w7, _val107, _sum10);
_sum11 = _mm256_fmadd_ps(_w0, _val110, _sum11);
_sum11 = _mm256_fmadd_ps(_w1, _val111, _sum11);
_sum11 = _mm256_fmadd_ps(_w2, _val112, _sum11);
_sum11 = _mm256_fmadd_ps(_w3, _val113, _sum11);
_sum11 = _mm256_fmadd_ps(_w4, _val114, _sum11);
_sum11 = _mm256_fmadd_ps(_w5, _val115, _sum11);
_sum11 = _mm256_fmadd_ps(_w6, _val116, _sum11);
_sum11 = _mm256_fmadd_ps(_w7, _val117, _sum11);
tmpptr += 96;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
_mm256_storeu_ps(outptr + 64, _sum8);
_mm256_storeu_ps(outptr + 72, _sum9);
_mm256_storeu_ps(outptr + 80, _sum10);
_mm256_storeu_ps(outptr + 88, _sum11);
outptr += 96;
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7);
tmpptr += 64;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
outptr += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
tmpptr += 32;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
outptr += 32;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
tmpptr += 16;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
outptr += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
__m256 _sum = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum = _mm256_fmadd_ps(_w0, _val0, _sum);
_sum = _mm256_fmadd_ps(_w1, _val1, _sum);
_sum = _mm256_fmadd_ps(_w2, _val2, _sum);
_sum = _mm256_fmadd_ps(_w3, _val3, _sum);
_sum = _mm256_fmadd_ps(_w4, _val4, _sum);
_sum = _mm256_fmadd_ps(_w5, _val5, _sum);
_sum = _mm256_fmadd_ps(_w6, _val6, _sum);
_sum = _mm256_fmadd_ps(_w7, _val7, _sum);
tmpptr += 8;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
static void conv1x1s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_loadu_ps(r0);
_mm256_storeu_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
class LearnerImpl;
/*! \brief data type accepted by xgboost interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*! \brief session-id of each instance, optional */
std::vector<uint64_t> qids_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 2;
/*! \brief version that introduced qid field */
static const int kVersionQidAdded = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return root_index_.size() != 0 ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
void Push(const Inst &inst);
size_t Size() { return offset.Size() - 1; }
};
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual BatchIteratorImpl* Clone() = 0;
virtual SparsePage& operator*() = 0;
virtual const SparsePage& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); }
BatchIterator(const BatchIterator& other) {
if (other.impl_) {
impl_.reset(other.impl_->Clone());
} else {
impl_.reset();
}
}
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
SparsePage& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const SparsePage& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::unique_ptr<BatchIteratorImpl> impl_;
};
class BatchSet {
public:
explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {}
BatchIterator begin() { return begin_iter_; }
BatchIterator end() { return BatchIterator(nullptr); }
private:
BatchIterator begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
class DataSource : public dmlc::DataIter<SparsePage> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets row batches. Use range based for loop over BatchSet to access individual batches.
*/
virtual BatchSet GetRowBatches() = 0;
virtual BatchSet GetSortedColumnBatches() = 0;
virtual BatchSet GetColumnBatches() = 0;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
const size_t page_size = kPageSize);
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \param page_size Page size for external memory.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "",
const size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
};
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
}
#endif // XGBOOST_DATA_H_
|
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <xgboost/data.h>
#include <utility>
#include <vector>
#include <type_traits> // enable_if
#include "host_device_vector.h"
#include "common.h"
#include "span.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif // defined (__CUDACC__)
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, GPUSet devices, bool reshard) :
func_(func), range_{std::move(range)},
reshard_{reshard},
distribution_{std::move(GPUDistribution::Block(devices))} {}
Evaluator(Functor func, Range range, GPUDistribution dist,
bool reshard) :
func_(func), range_{std::move(range)}, reshard_{reshard},
distribution_{std::move(dist)} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = !distribution_.IsEmpty();
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec, int _device) const {
auto span = _vec->DeviceSpan(_device);
return span;
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec, int _device) const {
auto span = _vec->ConstDeviceSpan(_device);
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive unpack for Reshard.
template <typename T>
void UnpackReshard(GPUDistribution dist, const HostDeviceVector<T>* vector) const {
vector->Reshard(dist);
}
template <typename Head, typename... Rest>
void UnpackReshard(GPUDistribution dist,
const HostDeviceVector<Head>* _vector,
const HostDeviceVector<Rest>*... _vectors) const {
_vector->Reshard(dist);
UnpackReshard(dist, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (reshard_)
UnpackReshard(distribution_, _vectors...);
GPUSet devices = distribution_.Devices();
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
size_t device_beg = *(devices.begin());
size_t device_end = *(devices.end());
#pragma omp parallel for schedule(static, 1) if (devices.Size() > 1)
for (omp_ulong device = device_beg; device < device_end; ++device) { // NOLINT
// Ignore other attributes of GPUDistribution for spliting index.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = GPUDistribution::Block(devices).ShardSize(
range_size, devices.Index(device));
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device));
const int GRID_SIZE =
static_cast<int>(dh::DivRoundUp(*(range_.end()), kBlockThreads));
detail::LaunchCUDAKernel<<<GRID_SIZE, kBlockThreads>>>(
_func, shard_range, UnpackHDV(_vectors, device)...);
}
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif // defined(__CUDACC__)
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
func(idx, UnpackHDV(vectors)...);
}
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether resharding for vectors is required. */
bool reshard_;
GPUDistribution distribution_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param devices GPUSet specifying GPUs to use, when compiling for CPU,
* this should be GPUSet::Empty().
* \param reshard Whether Reshard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
GPUSet const devices,
bool const reshard = true) {
return Evaluator<Functor> {func, std::move(range), std::move(devices), reshard};
}
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
GPUDistribution const dist,
bool const reshard = true) {
return Evaluator<Functor> {func, std::move(range), std::move(dist), reshard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
Example_target_ptr_map.2.c | /*
* @@name: target_ptr_map.2c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
#include <stdlib.h>
#define N 100
#pragma omp declare target
int *p;
extern void use_arg_p(int *p, int n);
extern void use_global_p( int n);
#pragma omp end declare target
int main()
{
int i;
p = (int *)malloc(sizeof(int)*N);
#pragma omp target map(p[:N]) // device p attached to array section
{
for (i=0; i<N; i++) p[i] = i;
use_arg_p(p, N);
use_global_p(N);
} // value of host p is preserved
printf(" %3.3d %3.3d\n", p[1], p[N-1]);
// 003 297 <- output
free(p);
return 0;
}
//#pragma omp declare target (optional here because of prototype spec)
void use_arg_p(int *q, int n)
{
int i;
for (i=0; i<n; i++)
q[i] *= 2;
}
void use_global_p(int n)
{
int i;
for (i=0; i<n; i++)
p[i] += i; // valid since p is in declare target and called from
// inside target region where p was attached to valid memory
}
//#pragma omp end declare target (optional here because of prototype spec)
|
GB_unop__identity_fc32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint8)
// op(A') function: GB (_unop_tran__identity_fc32_uint8)
// C type: GxB_FC32_t
// A type: uint8_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint8)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bondfree.c | /*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team,
* check out http://www.gromacs.org for more information.
* Copyright (c) 2012,2013, by the GROMACS development team, led by
* David van der Spoel, Berk Hess, Erik Lindahl, and including many
* others, as listed in the AUTHORS file in the top-level source
* directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <math.h>
#include <assert.h>
#include "physics.h"
#include "vec.h"
#include "maths.h"
#include "txtdump.h"
#include "bondf.h"
#include "smalloc.h"
#include "pbc.h"
#include "ns.h"
#include "macros.h"
#include "names.h"
#include "gmx_fatal.h"
#include "mshift.h"
#include "main.h"
#include "disre.h"
#include "orires.h"
#include "force.h"
#include "nonbonded.h"
/* Include the SIMD macro file and then check for support */
#include "gmx_simd_macros.h"
#if defined GMX_HAVE_SIMD_MACROS && defined GMX_SIMD_HAVE_TRIGONOMETRIC
#define SIMD_BONDEDS
#include "gmx_simd_vec.h"
#endif
/* Find a better place for this? */
const int cmap_coeff_matrix[] = {
1, 0, -3, 2, 0, 0, 0, 0, -3, 0, 9, -6, 2, 0, -6, 4,
0, 0, 0, 0, 0, 0, 0, 0, 3, 0, -9, 6, -2, 0, 6, -4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, -6, 0, 0, -6, 4,
0, 0, 3, -2, 0, 0, 0, 0, 0, 0, -9, 6, 0, 0, 6, -4,
0, 0, 0, 0, 1, 0, -3, 2, -2, 0, 6, -4, 1, 0, -3, 2,
0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 3, -2, 1, 0, -3, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 2, 0, 0, 3, -2,
0, 0, 0, 0, 0, 0, 3, -2, 0, 0, -6, 4, 0, 0, 3, -2,
0, 1, -2, 1, 0, 0, 0, 0, 0, -3, 6, -3, 0, 2, -4, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, -6, 3, 0, -2, 4, -2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, 2, -2,
0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 3, -3, 0, 0, -2, 2,
0, 0, 0, 0, 0, 1, -2, 1, 0, -2, 4, -2, 0, 1, -2, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 2, -1, 0, 1, -2, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, -1, 1,
0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 2, -2, 0, 0, -1, 1
};
int glatnr(int *global_atom_index, int i)
{
int atnr;
if (global_atom_index == NULL)
{
atnr = i + 1;
}
else
{
atnr = global_atom_index[i] + 1;
}
return atnr;
}
static int pbc_rvec_sub(const t_pbc *pbc, const rvec xi, const rvec xj, rvec dx)
{
if (pbc)
{
return pbc_dx_aiuc(pbc, xi, xj, dx);
}
else
{
rvec_sub(xi, xj, dx);
return CENTRAL;
}
}
#ifdef SIMD_BONDEDS
/* SIMD PBC data structure, containing 1/boxdiag and the box vectors */
typedef struct {
gmx_mm_pr inv_bzz;
gmx_mm_pr inv_byy;
gmx_mm_pr inv_bxx;
gmx_mm_pr bzx;
gmx_mm_pr bzy;
gmx_mm_pr bzz;
gmx_mm_pr byx;
gmx_mm_pr byy;
gmx_mm_pr bxx;
} pbc_simd_t;
/* Set the SIMD pbc data from a normal t_pbc struct */
static void set_pbc_simd(const t_pbc *pbc, pbc_simd_t *pbc_simd)
{
rvec inv_bdiag;
int d;
/* Setting inv_bdiag to 0 effectively turns off PBC */
clear_rvec(inv_bdiag);
if (pbc != NULL)
{
for (d = 0; d < pbc->ndim_ePBC; d++)
{
inv_bdiag[d] = 1.0/pbc->box[d][d];
}
}
pbc_simd->inv_bzz = gmx_set1_pr(inv_bdiag[ZZ]);
pbc_simd->inv_byy = gmx_set1_pr(inv_bdiag[YY]);
pbc_simd->inv_bxx = gmx_set1_pr(inv_bdiag[XX]);
if (pbc != NULL)
{
pbc_simd->bzx = gmx_set1_pr(pbc->box[ZZ][XX]);
pbc_simd->bzy = gmx_set1_pr(pbc->box[ZZ][YY]);
pbc_simd->bzz = gmx_set1_pr(pbc->box[ZZ][ZZ]);
pbc_simd->byx = gmx_set1_pr(pbc->box[YY][XX]);
pbc_simd->byy = gmx_set1_pr(pbc->box[YY][YY]);
pbc_simd->bxx = gmx_set1_pr(pbc->box[XX][XX]);
}
else
{
pbc_simd->bzx = gmx_setzero_pr();
pbc_simd->bzy = gmx_setzero_pr();
pbc_simd->bzz = gmx_setzero_pr();
pbc_simd->byx = gmx_setzero_pr();
pbc_simd->byy = gmx_setzero_pr();
pbc_simd->bxx = gmx_setzero_pr();
}
}
/* Correct distance vector *dx,*dy,*dz for PBC using SIMD */
static gmx_inline void
pbc_dx_simd(gmx_mm_pr *dx, gmx_mm_pr *dy, gmx_mm_pr *dz,
const pbc_simd_t *pbc)
{
gmx_mm_pr sh;
sh = gmx_round_pr(gmx_mul_pr(*dz, pbc->inv_bzz));
*dx = gmx_nmsub_pr(sh, pbc->bzx, *dx);
*dy = gmx_nmsub_pr(sh, pbc->bzy, *dy);
*dz = gmx_nmsub_pr(sh, pbc->bzz, *dz);
sh = gmx_round_pr(gmx_mul_pr(*dy, pbc->inv_byy));
*dx = gmx_nmsub_pr(sh, pbc->byx, *dx);
*dy = gmx_nmsub_pr(sh, pbc->byy, *dy);
sh = gmx_round_pr(gmx_mul_pr(*dx, pbc->inv_bxx));
*dx = gmx_nmsub_pr(sh, pbc->bxx, *dx);
}
#endif /* SIMD_BONDEDS */
/*
* Morse potential bond by Frank Everdij
*
* Three parameters needed:
*
* b0 = equilibrium distance in nm
* be = beta in nm^-1 (actually, it's nu_e*Sqrt(2*pi*pi*mu/D_e))
* cb = well depth in kJ/mol
*
* Note: the potential is referenced to be +cb at infinite separation
* and zero at the equilibrium distance!
*/
real morse_bonds(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
const real one = 1.0;
const real two = 2.0;
real dr, dr2, temp, omtemp, cbomtemp, fbond, vbond, fij, vtot;
real b0, be, cb, b0A, beA, cbA, b0B, beB, cbB, L1;
rvec dx;
int i, m, ki, type, ai, aj;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
b0A = forceparams[type].morse.b0A;
beA = forceparams[type].morse.betaA;
cbA = forceparams[type].morse.cbA;
b0B = forceparams[type].morse.b0B;
beB = forceparams[type].morse.betaB;
cbB = forceparams[type].morse.cbB;
L1 = one-lambda; /* 1 */
b0 = L1*b0A + lambda*b0B; /* 3 */
be = L1*beA + lambda*beB; /* 3 */
cb = L1*cbA + lambda*cbB; /* 3 */
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
dr = dr2*gmx_invsqrt(dr2); /* 10 */
temp = exp(-be*(dr-b0)); /* 12 */
if (temp == one)
{
/* bonds are constrainted. This may _not_ include bond constraints if they are lambda dependent */
*dvdlambda += cbB-cbA;
continue;
}
omtemp = one-temp; /* 1 */
cbomtemp = cb*omtemp; /* 1 */
vbond = cbomtemp*omtemp; /* 1 */
fbond = -two*be*temp*cbomtemp*gmx_invsqrt(dr2); /* 9 */
vtot += vbond; /* 1 */
*dvdlambda += (cbB - cbA) * omtemp * omtemp - (2-2*omtemp)*omtemp * cb * ((b0B-b0A)*be - (beB-beA)*(dr-b0)); /* 15 */
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 83 TOTAL */
return vtot;
}
real cubic_bonds(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
const real three = 3.0;
const real two = 2.0;
real kb, b0, kcub;
real dr, dr2, dist, kdist, kdist2, fbond, vbond, fij, vtot;
rvec dx;
int i, m, ki, type, ai, aj;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
b0 = forceparams[type].cubic.b0;
kb = forceparams[type].cubic.kb;
kcub = forceparams[type].cubic.kcub;
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
if (dr2 == 0.0)
{
continue;
}
dr = dr2*gmx_invsqrt(dr2); /* 10 */
dist = dr-b0;
kdist = kb*dist;
kdist2 = kdist*dist;
vbond = kdist2 + kcub*kdist2*dist;
fbond = -(two*kdist + three*kdist2*kcub)/dr;
vtot += vbond; /* 21 */
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 54 TOTAL */
return vtot;
}
real FENE_bonds(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
const real half = 0.5;
const real one = 1.0;
real bm, kb;
real dr, dr2, bm2, omdr2obm2, fbond, vbond, fij, vtot;
rvec dx;
int i, m, ki, type, ai, aj;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
bm = forceparams[type].fene.bm;
kb = forceparams[type].fene.kb;
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
if (dr2 == 0.0)
{
continue;
}
bm2 = bm*bm;
if (dr2 >= bm2)
{
gmx_fatal(FARGS,
"r^2 (%f) >= bm^2 (%f) in FENE bond between atoms %d and %d",
dr2, bm2,
glatnr(global_atom_index, ai),
glatnr(global_atom_index, aj));
}
omdr2obm2 = one - dr2/bm2;
vbond = -half*kb*bm2*log(omdr2obm2);
fbond = -kb/omdr2obm2;
vtot += vbond; /* 35 */
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 58 TOTAL */
return vtot;
}
real harmonic(real kA, real kB, real xA, real xB, real x, real lambda,
real *V, real *F)
{
const real half = 0.5;
real L1, kk, x0, dx, dx2;
real v, f, dvdlambda;
L1 = 1.0-lambda;
kk = L1*kA+lambda*kB;
x0 = L1*xA+lambda*xB;
dx = x-x0;
dx2 = dx*dx;
f = -kk*dx;
v = half*kk*dx2;
dvdlambda = half*(kB-kA)*dx2 + (xA-xB)*kk*dx;
*F = f;
*V = v;
return dvdlambda;
/* That was 19 flops */
}
real bonds(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ki, ai, aj, type;
real dr, dr2, fbond, vbond, fij, vtot;
rvec dx;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
dr = dr2*gmx_invsqrt(dr2); /* 10 */
*dvdlambda += harmonic(forceparams[type].harmonic.krA,
forceparams[type].harmonic.krB,
forceparams[type].harmonic.rA,
forceparams[type].harmonic.rB,
dr, lambda, &vbond, &fbond); /* 19 */
if (dr2 == 0.0)
{
continue;
}
vtot += vbond; /* 1*/
fbond *= gmx_invsqrt(dr2); /* 6 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "BONDS: dr = %10g vbond = %10g fbond = %10g\n",
dr, vbond, fbond);
}
#endif
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 59 TOTAL */
return vtot;
}
real restraint_bonds(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ki, ai, aj, type;
real dr, dr2, fbond, vbond, fij, vtot;
real L1;
real low, dlow, up1, dup1, up2, dup2, k, dk;
real drh, drh2;
rvec dx;
ivec dt;
L1 = 1.0 - lambda;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
dr = dr2*gmx_invsqrt(dr2); /* 10 */
low = L1*forceparams[type].restraint.lowA + lambda*forceparams[type].restraint.lowB;
dlow = -forceparams[type].restraint.lowA + forceparams[type].restraint.lowB;
up1 = L1*forceparams[type].restraint.up1A + lambda*forceparams[type].restraint.up1B;
dup1 = -forceparams[type].restraint.up1A + forceparams[type].restraint.up1B;
up2 = L1*forceparams[type].restraint.up2A + lambda*forceparams[type].restraint.up2B;
dup2 = -forceparams[type].restraint.up2A + forceparams[type].restraint.up2B;
k = L1*forceparams[type].restraint.kA + lambda*forceparams[type].restraint.kB;
dk = -forceparams[type].restraint.kA + forceparams[type].restraint.kB;
/* 24 */
if (dr < low)
{
drh = dr - low;
drh2 = drh*drh;
vbond = 0.5*k*drh2;
fbond = -k*drh;
*dvdlambda += 0.5*dk*drh2 - k*dlow*drh;
} /* 11 */
else if (dr <= up1)
{
vbond = 0;
fbond = 0;
}
else if (dr <= up2)
{
drh = dr - up1;
drh2 = drh*drh;
vbond = 0.5*k*drh2;
fbond = -k*drh;
*dvdlambda += 0.5*dk*drh2 - k*dup1*drh;
} /* 11 */
else
{
drh = dr - up2;
vbond = k*(up2 - up1)*(0.5*(up2 - up1) + drh);
fbond = -k*(up2 - up1);
*dvdlambda += dk*(up2 - up1)*(0.5*(up2 - up1) + drh)
+ k*(dup2 - dup1)*(up2 - up1 + drh)
- k*(up2 - up1)*dup2;
}
if (dr2 == 0.0)
{
continue;
}
vtot += vbond; /* 1*/
fbond *= gmx_invsqrt(dr2); /* 6 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "BONDS: dr = %10g vbond = %10g fbond = %10g\n",
dr, vbond, fbond);
}
#endif
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 59 TOTAL */
return vtot;
}
real polarize(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ki, ai, aj, type;
real dr, dr2, fbond, vbond, fij, vtot, ksh;
rvec dx;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ksh = sqr(md->chargeA[aj])*ONE_4PI_EPS0/forceparams[type].polarize.alpha;
if (debug)
{
fprintf(debug, "POL: local ai = %d aj = %d ksh = %.3f\n", ai, aj, ksh);
}
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
dr = dr2*gmx_invsqrt(dr2); /* 10 */
*dvdlambda += harmonic(ksh, ksh, 0, 0, dr, lambda, &vbond, &fbond); /* 19 */
if (dr2 == 0.0)
{
continue;
}
vtot += vbond; /* 1*/
fbond *= gmx_invsqrt(dr2); /* 6 */
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 59 TOTAL */
return vtot;
}
real anharm_polarize(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ki, ai, aj, type;
real dr, dr2, fbond, vbond, fij, vtot, ksh, khyp, drcut, ddr, ddr3;
rvec dx;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ksh = sqr(md->chargeA[aj])*ONE_4PI_EPS0/forceparams[type].anharm_polarize.alpha; /* 7*/
khyp = forceparams[type].anharm_polarize.khyp;
drcut = forceparams[type].anharm_polarize.drcut;
if (debug)
{
fprintf(debug, "POL: local ai = %d aj = %d ksh = %.3f\n", ai, aj, ksh);
}
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
dr = dr2*gmx_invsqrt(dr2); /* 10 */
*dvdlambda += harmonic(ksh, ksh, 0, 0, dr, lambda, &vbond, &fbond); /* 19 */
if (dr2 == 0.0)
{
continue;
}
if (dr > drcut)
{
ddr = dr-drcut;
ddr3 = ddr*ddr*ddr;
vbond += khyp*ddr*ddr3;
fbond -= 4*khyp*ddr3;
}
fbond *= gmx_invsqrt(dr2); /* 6 */
vtot += vbond; /* 1*/
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 72 TOTAL */
return vtot;
}
real water_pol(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
/* This routine implements anisotropic polarizibility for water, through
* a shell connected to a dummy with spring constant that differ in the
* three spatial dimensions in the molecular frame.
*/
int i, m, aO, aH1, aH2, aD, aS, type, type0;
rvec dOH1, dOH2, dHH, dOD, dDS, nW, kk, dx, kdx, proj;
#ifdef DEBUG
rvec df;
#endif
real vtot, fij, r_HH, r_OD, r_nW, tx, ty, tz, qS;
vtot = 0.0;
if (nbonds > 0)
{
type0 = forceatoms[0];
aS = forceatoms[5];
qS = md->chargeA[aS];
kk[XX] = sqr(qS)*ONE_4PI_EPS0/forceparams[type0].wpol.al_x;
kk[YY] = sqr(qS)*ONE_4PI_EPS0/forceparams[type0].wpol.al_y;
kk[ZZ] = sqr(qS)*ONE_4PI_EPS0/forceparams[type0].wpol.al_z;
r_HH = 1.0/forceparams[type0].wpol.rHH;
r_OD = 1.0/forceparams[type0].wpol.rOD;
if (debug)
{
fprintf(debug, "WPOL: qS = %10.5f aS = %5d\n", qS, aS);
fprintf(debug, "WPOL: kk = %10.3f %10.3f %10.3f\n",
kk[XX], kk[YY], kk[ZZ]);
fprintf(debug, "WPOL: rOH = %10.3f rHH = %10.3f rOD = %10.3f\n",
forceparams[type0].wpol.rOH,
forceparams[type0].wpol.rHH,
forceparams[type0].wpol.rOD);
}
for (i = 0; (i < nbonds); i += 6)
{
type = forceatoms[i];
if (type != type0)
{
gmx_fatal(FARGS, "Sorry, type = %d, type0 = %d, file = %s, line = %d",
type, type0, __FILE__, __LINE__);
}
aO = forceatoms[i+1];
aH1 = forceatoms[i+2];
aH2 = forceatoms[i+3];
aD = forceatoms[i+4];
aS = forceatoms[i+5];
/* Compute vectors describing the water frame */
rvec_sub(x[aH1], x[aO], dOH1);
rvec_sub(x[aH2], x[aO], dOH2);
rvec_sub(x[aH2], x[aH1], dHH);
rvec_sub(x[aD], x[aO], dOD);
rvec_sub(x[aS], x[aD], dDS);
cprod(dOH1, dOH2, nW);
/* Compute inverse length of normal vector
* (this one could be precomputed, but I'm too lazy now)
*/
r_nW = gmx_invsqrt(iprod(nW, nW));
/* This is for precision, but does not make a big difference,
* it can go later.
*/
r_OD = gmx_invsqrt(iprod(dOD, dOD));
/* Normalize the vectors in the water frame */
svmul(r_nW, nW, nW);
svmul(r_HH, dHH, dHH);
svmul(r_OD, dOD, dOD);
/* Compute displacement of shell along components of the vector */
dx[ZZ] = iprod(dDS, dOD);
/* Compute projection on the XY plane: dDS - dx[ZZ]*dOD */
for (m = 0; (m < DIM); m++)
{
proj[m] = dDS[m]-dx[ZZ]*dOD[m];
}
/*dx[XX] = iprod(dDS,nW);
dx[YY] = iprod(dDS,dHH);*/
dx[XX] = iprod(proj, nW);
for (m = 0; (m < DIM); m++)
{
proj[m] -= dx[XX]*nW[m];
}
dx[YY] = iprod(proj, dHH);
/*#define DEBUG*/
#ifdef DEBUG
if (debug)
{
fprintf(debug, "WPOL: dx2=%10g dy2=%10g dz2=%10g sum=%10g dDS^2=%10g\n",
sqr(dx[XX]), sqr(dx[YY]), sqr(dx[ZZ]), iprod(dx, dx), iprod(dDS, dDS));
fprintf(debug, "WPOL: dHH=(%10g,%10g,%10g)\n", dHH[XX], dHH[YY], dHH[ZZ]);
fprintf(debug, "WPOL: dOD=(%10g,%10g,%10g), 1/r_OD = %10g\n",
dOD[XX], dOD[YY], dOD[ZZ], 1/r_OD);
fprintf(debug, "WPOL: nW =(%10g,%10g,%10g), 1/r_nW = %10g\n",
nW[XX], nW[YY], nW[ZZ], 1/r_nW);
fprintf(debug, "WPOL: dx =%10g, dy =%10g, dz =%10g\n",
dx[XX], dx[YY], dx[ZZ]);
fprintf(debug, "WPOL: dDSx=%10g, dDSy=%10g, dDSz=%10g\n",
dDS[XX], dDS[YY], dDS[ZZ]);
}
#endif
/* Now compute the forces and energy */
kdx[XX] = kk[XX]*dx[XX];
kdx[YY] = kk[YY]*dx[YY];
kdx[ZZ] = kk[ZZ]*dx[ZZ];
vtot += iprod(dx, kdx);
for (m = 0; (m < DIM); m++)
{
/* This is a tensor operation but written out for speed */
tx = nW[m]*kdx[XX];
ty = dHH[m]*kdx[YY];
tz = dOD[m]*kdx[ZZ];
fij = -tx-ty-tz;
#ifdef DEBUG
df[m] = fij;
#endif
f[aS][m] += fij;
f[aD][m] -= fij;
}
#ifdef DEBUG
if (debug)
{
fprintf(debug, "WPOL: vwpol=%g\n", 0.5*iprod(dx, kdx));
fprintf(debug, "WPOL: df = (%10g, %10g, %10g)\n", df[XX], df[YY], df[ZZ]);
}
#endif
}
}
return 0.5*vtot;
}
static real do_1_thole(const rvec xi, const rvec xj, rvec fi, rvec fj,
const t_pbc *pbc, real qq,
rvec fshift[], real afac)
{
rvec r12;
real r12sq, r12_1, r12n, r12bar, v0, v1, fscal, ebar, fff;
int m, t;
t = pbc_rvec_sub(pbc, xi, xj, r12); /* 3 */
r12sq = iprod(r12, r12); /* 5 */
r12_1 = gmx_invsqrt(r12sq); /* 5 */
r12bar = afac/r12_1; /* 5 */
v0 = qq*ONE_4PI_EPS0*r12_1; /* 2 */
ebar = exp(-r12bar); /* 5 */
v1 = (1-(1+0.5*r12bar)*ebar); /* 4 */
fscal = ((v0*r12_1)*v1 - v0*0.5*afac*ebar*(r12bar+1))*r12_1; /* 9 */
if (debug)
{
fprintf(debug, "THOLE: v0 = %.3f v1 = %.3f r12= % .3f r12bar = %.3f fscal = %.3f ebar = %.3f\n", v0, v1, 1/r12_1, r12bar, fscal, ebar);
}
for (m = 0; (m < DIM); m++)
{
fff = fscal*r12[m];
fi[m] += fff;
fj[m] -= fff;
fshift[t][m] += fff;
fshift[CENTRAL][m] -= fff;
} /* 15 */
return v0*v1; /* 1 */
/* 54 */
}
real thole_pol(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
/* Interaction between two pairs of particles with opposite charge */
int i, type, a1, da1, a2, da2;
real q1, q2, qq, a, al1, al2, afac;
real V = 0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
a1 = forceatoms[i++];
da1 = forceatoms[i++];
a2 = forceatoms[i++];
da2 = forceatoms[i++];
q1 = md->chargeA[da1];
q2 = md->chargeA[da2];
a = forceparams[type].thole.a;
al1 = forceparams[type].thole.alpha1;
al2 = forceparams[type].thole.alpha2;
qq = q1*q2;
afac = a*pow(al1*al2, -1.0/6.0);
V += do_1_thole(x[a1], x[a2], f[a1], f[a2], pbc, qq, fshift, afac);
V += do_1_thole(x[da1], x[a2], f[da1], f[a2], pbc, -qq, fshift, afac);
V += do_1_thole(x[a1], x[da2], f[a1], f[da2], pbc, -qq, fshift, afac);
V += do_1_thole(x[da1], x[da2], f[da1], f[da2], pbc, qq, fshift, afac);
}
/* 290 flops */
return V;
}
real bond_angle(const rvec xi, const rvec xj, const rvec xk, const t_pbc *pbc,
rvec r_ij, rvec r_kj, real *costh,
int *t1, int *t2)
/* Return value is the angle between the bonds i-j and j-k */
{
/* 41 FLOPS */
real th;
*t1 = pbc_rvec_sub(pbc, xi, xj, r_ij); /* 3 */
*t2 = pbc_rvec_sub(pbc, xk, xj, r_kj); /* 3 */
*costh = cos_angle(r_ij, r_kj); /* 25 */
th = acos(*costh); /* 10 */
/* 41 TOTAL */
return th;
}
real angles(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, ai, aj, ak, t1, t2, type;
rvec r_ij, r_kj;
real cos_theta, cos_theta2, theta, dVdt, va, vtot;
ivec jt, dt_ij, dt_kj;
vtot = 0.0;
for (i = 0; i < nbonds; )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
theta = bond_angle(x[ai], x[aj], x[ak], pbc,
r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */
*dvdlambda += harmonic(forceparams[type].harmonic.krA,
forceparams[type].harmonic.krB,
forceparams[type].harmonic.rA*DEG2RAD,
forceparams[type].harmonic.rB*DEG2RAD,
theta, lambda, &va, &dVdt); /* 21 */
vtot += va;
cos_theta2 = sqr(cos_theta);
if (cos_theta2 < 1)
{
int m;
real st, sth;
real cik, cii, ckk;
real nrkj2, nrij2;
real nrkj_1, nrij_1;
rvec f_i, f_j, f_k;
st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */
sth = st*cos_theta; /* 1 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n",
theta*RAD2DEG, va, dVdt);
}
#endif
nrij2 = iprod(r_ij, r_ij); /* 5 */
nrkj2 = iprod(r_kj, r_kj); /* 5 */
nrij_1 = gmx_invsqrt(nrij2); /* 10 */
nrkj_1 = gmx_invsqrt(nrkj2); /* 10 */
cik = st*nrij_1*nrkj_1; /* 2 */
cii = sth*nrij_1*nrij_1; /* 2 */
ckk = sth*nrkj_1*nrkj_1; /* 2 */
for (m = 0; m < DIM; m++)
{ /* 39 */
f_i[m] = -(cik*r_kj[m] - cii*r_ij[m]);
f_k[m] = -(cik*r_ij[m] - ckk*r_kj[m]);
f_j[m] = -f_i[m] - f_k[m];
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
if (g != NULL)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k);
} /* 161 TOTAL */
}
return vtot;
}
#ifdef SIMD_BONDEDS
/* As angles, but using SIMD to calculate many dihedrals at once.
* This routines does not calculate energies and shift forces.
*/
static gmx_inline void
angles_noener_simd(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[],
const t_pbc *pbc, const t_graph *g,
real lambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
#define UNROLL GMX_SIMD_WIDTH_HERE
const int nfa1 = 4;
int i, iu, s, m;
int type, ai[UNROLL], aj[UNROLL], ak[UNROLL];
real coeff_array[2*UNROLL+UNROLL], *coeff;
real dr_array[2*DIM*UNROLL+UNROLL], *dr;
real f_buf_array[6*UNROLL+UNROLL], *f_buf;
gmx_mm_pr k_S, theta0_S;
gmx_mm_pr rijx_S, rijy_S, rijz_S;
gmx_mm_pr rkjx_S, rkjy_S, rkjz_S;
gmx_mm_pr one_S;
gmx_mm_pr min_one_plus_eps_S;
gmx_mm_pr rij_rkj_S;
gmx_mm_pr nrij2_S, nrij_1_S;
gmx_mm_pr nrkj2_S, nrkj_1_S;
gmx_mm_pr cos_S, invsin_S;
gmx_mm_pr theta_S;
gmx_mm_pr st_S, sth_S;
gmx_mm_pr cik_S, cii_S, ckk_S;
gmx_mm_pr f_ix_S, f_iy_S, f_iz_S;
gmx_mm_pr f_kx_S, f_ky_S, f_kz_S;
pbc_simd_t pbc_simd;
/* Ensure register memory alignment */
coeff = gmx_simd_align_real(coeff_array);
dr = gmx_simd_align_real(dr_array);
f_buf = gmx_simd_align_real(f_buf_array);
set_pbc_simd(pbc,&pbc_simd);
one_S = gmx_set1_pr(1.0);
/* The smallest number > -1 */
min_one_plus_eps_S = gmx_set1_pr(-1.0 + 2*GMX_REAL_EPS);
/* nbonds is the number of angles times nfa1, here we step UNROLL angles */
for (i = 0; (i < nbonds); i += UNROLL*nfa1)
{
/* Collect atoms for UNROLL angles.
* iu indexes into forceatoms, we should not let iu go beyond nbonds.
*/
iu = i;
for (s = 0; s < UNROLL; s++)
{
type = forceatoms[iu];
ai[s] = forceatoms[iu+1];
aj[s] = forceatoms[iu+2];
ak[s] = forceatoms[iu+3];
coeff[s] = forceparams[type].harmonic.krA;
coeff[UNROLL+s] = forceparams[type].harmonic.rA*DEG2RAD;
/* If you can't use pbc_dx_simd below for PBC, e.g. because
* you can't round in SIMD, use pbc_rvec_sub here.
*/
/* Store the non PBC corrected distances packed and aligned */
for (m = 0; m < DIM; m++)
{
dr[s + m *UNROLL] = x[ai[s]][m] - x[aj[s]][m];
dr[s + (DIM+m)*UNROLL] = x[ak[s]][m] - x[aj[s]][m];
}
/* At the end fill the arrays with identical entries */
if (iu + nfa1 < nbonds)
{
iu += nfa1;
}
}
k_S = gmx_load_pr(coeff);
theta0_S = gmx_load_pr(coeff+UNROLL);
rijx_S = gmx_load_pr(dr + 0*UNROLL);
rijy_S = gmx_load_pr(dr + 1*UNROLL);
rijz_S = gmx_load_pr(dr + 2*UNROLL);
rkjx_S = gmx_load_pr(dr + 3*UNROLL);
rkjy_S = gmx_load_pr(dr + 4*UNROLL);
rkjz_S = gmx_load_pr(dr + 5*UNROLL);
pbc_dx_simd(&rijx_S, &rijy_S, &rijz_S, &pbc_simd);
pbc_dx_simd(&rkjx_S, &rkjy_S, &rkjz_S, &pbc_simd);
rij_rkj_S = gmx_iprod_pr(rijx_S, rijy_S, rijz_S,
rkjx_S, rkjy_S, rkjz_S);
nrij2_S = gmx_norm2_pr(rijx_S, rijy_S, rijz_S);
nrkj2_S = gmx_norm2_pr(rkjx_S, rkjy_S, rkjz_S);
nrij_1_S = gmx_invsqrt_pr(nrij2_S);
nrkj_1_S = gmx_invsqrt_pr(nrkj2_S);
cos_S = gmx_mul_pr(rij_rkj_S, gmx_mul_pr(nrij_1_S, nrkj_1_S));
/* To allow for 180 degrees, we take the max of cos and -1 + 1bit,
* so we can safely get the 1/sin from 1/sqrt(1 - cos^2).
* This also ensures that rounding errors would cause the argument
* of gmx_acos_pr to be < -1.
* Note that we do not take precautions for cos(0)=1, so the outer
* atoms in an angle should not be on top of each other.
*/
cos_S = gmx_max_pr(cos_S, min_one_plus_eps_S);
theta_S = gmx_acos_pr(cos_S);
invsin_S = gmx_invsqrt_pr(gmx_sub_pr(one_S, gmx_mul_pr(cos_S, cos_S)));
st_S = gmx_mul_pr(gmx_mul_pr(k_S, gmx_sub_pr(theta0_S, theta_S)),
invsin_S);
sth_S = gmx_mul_pr(st_S, cos_S);
cik_S = gmx_mul_pr(st_S, gmx_mul_pr(nrij_1_S, nrkj_1_S));
cii_S = gmx_mul_pr(sth_S, gmx_mul_pr(nrij_1_S, nrij_1_S));
ckk_S = gmx_mul_pr(sth_S, gmx_mul_pr(nrkj_1_S, nrkj_1_S));
f_ix_S = gmx_mul_pr(cii_S, rijx_S);
f_ix_S = gmx_nmsub_pr(cik_S, rkjx_S, f_ix_S);
f_iy_S = gmx_mul_pr(cii_S, rijy_S);
f_iy_S = gmx_nmsub_pr(cik_S, rkjy_S, f_iy_S);
f_iz_S = gmx_mul_pr(cii_S, rijz_S);
f_iz_S = gmx_nmsub_pr(cik_S, rkjz_S, f_iz_S);
f_kx_S = gmx_mul_pr(ckk_S, rkjx_S);
f_kx_S = gmx_nmsub_pr(cik_S, rijx_S, f_kx_S);
f_ky_S = gmx_mul_pr(ckk_S, rkjy_S);
f_ky_S = gmx_nmsub_pr(cik_S, rijy_S, f_ky_S);
f_kz_S = gmx_mul_pr(ckk_S, rkjz_S);
f_kz_S = gmx_nmsub_pr(cik_S, rijz_S, f_kz_S);
gmx_store_pr(f_buf + 0*UNROLL, f_ix_S);
gmx_store_pr(f_buf + 1*UNROLL, f_iy_S);
gmx_store_pr(f_buf + 2*UNROLL, f_iz_S);
gmx_store_pr(f_buf + 3*UNROLL, f_kx_S);
gmx_store_pr(f_buf + 4*UNROLL, f_ky_S);
gmx_store_pr(f_buf + 5*UNROLL, f_kz_S);
iu = i;
s = 0;
do
{
for (m = 0; m < DIM; m++)
{
f[ai[s]][m] += f_buf[s + m*UNROLL];
f[aj[s]][m] -= f_buf[s + m*UNROLL] + f_buf[s + (DIM+m)*UNROLL];
f[ak[s]][m] += f_buf[s + (DIM+m)*UNROLL];
}
s++;
iu += nfa1;
}
while (s < UNROLL && iu < nbonds);
}
#undef UNROLL
}
#endif /* SIMD_BONDEDS */
real linear_angles(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ai, aj, ak, t1, t2, type;
rvec f_i, f_j, f_k;
real L1, kA, kB, aA, aB, dr, dr2, va, vtot, a, b, klin;
ivec jt, dt_ij, dt_kj;
rvec r_ij, r_kj, r_ik, dx;
L1 = 1-lambda;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
kA = forceparams[type].linangle.klinA;
kB = forceparams[type].linangle.klinB;
klin = L1*kA + lambda*kB;
aA = forceparams[type].linangle.aA;
aB = forceparams[type].linangle.aB;
a = L1*aA+lambda*aB;
b = 1-a;
t1 = pbc_rvec_sub(pbc, x[ai], x[aj], r_ij);
t2 = pbc_rvec_sub(pbc, x[ak], x[aj], r_kj);
rvec_sub(r_ij, r_kj, r_ik);
dr2 = 0;
for (m = 0; (m < DIM); m++)
{
dr = -a * r_ij[m] - b * r_kj[m];
dr2 += dr*dr;
dx[m] = dr;
f_i[m] = a*klin*dr;
f_k[m] = b*klin*dr;
f_j[m] = -(f_i[m]+f_k[m]);
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
va = 0.5*klin*dr2;
*dvdlambda += 0.5*(kB-kA)*dr2 + klin*(aB-aA)*iprod(dx, r_ik);
vtot += va;
if (g)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k);
} /* 57 TOTAL */
return vtot;
}
real urey_bradley(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ai, aj, ak, t1, t2, type, ki;
rvec r_ij, r_kj, r_ik;
real cos_theta, cos_theta2, theta;
real dVdt, va, vtot, dr, dr2, vbond, fbond, fik;
real kthA, th0A, kUBA, r13A, kthB, th0B, kUBB, r13B;
ivec jt, dt_ij, dt_kj, dt_ik;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
th0A = forceparams[type].u_b.thetaA*DEG2RAD;
kthA = forceparams[type].u_b.kthetaA;
r13A = forceparams[type].u_b.r13A;
kUBA = forceparams[type].u_b.kUBA;
th0B = forceparams[type].u_b.thetaB*DEG2RAD;
kthB = forceparams[type].u_b.kthetaB;
r13B = forceparams[type].u_b.r13B;
kUBB = forceparams[type].u_b.kUBB;
theta = bond_angle(x[ai], x[aj], x[ak], pbc,
r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */
*dvdlambda += harmonic(kthA, kthB, th0A, th0B, theta, lambda, &va, &dVdt); /* 21 */
vtot += va;
ki = pbc_rvec_sub(pbc, x[ai], x[ak], r_ik); /* 3 */
dr2 = iprod(r_ik, r_ik); /* 5 */
dr = dr2*gmx_invsqrt(dr2); /* 10 */
*dvdlambda += harmonic(kUBA, kUBB, r13A, r13B, dr, lambda, &vbond, &fbond); /* 19 */
cos_theta2 = sqr(cos_theta); /* 1 */
if (cos_theta2 < 1)
{
real st, sth;
real cik, cii, ckk;
real nrkj2, nrij2;
rvec f_i, f_j, f_k;
st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */
sth = st*cos_theta; /* 1 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n",
theta*RAD2DEG, va, dVdt);
}
#endif
nrkj2 = iprod(r_kj, r_kj); /* 5 */
nrij2 = iprod(r_ij, r_ij);
cik = st*gmx_invsqrt(nrkj2*nrij2); /* 12 */
cii = sth/nrij2; /* 10 */
ckk = sth/nrkj2; /* 10 */
for (m = 0; (m < DIM); m++) /* 39 */
{
f_i[m] = -(cik*r_kj[m]-cii*r_ij[m]);
f_k[m] = -(cik*r_ij[m]-ckk*r_kj[m]);
f_j[m] = -f_i[m]-f_k[m];
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
if (g)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k);
} /* 161 TOTAL */
/* Time for the bond calculations */
if (dr2 == 0.0)
{
continue;
}
vtot += vbond; /* 1*/
fbond *= gmx_invsqrt(dr2); /* 6 */
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ak), dt_ik);
ki = IVEC2IS(dt_ik);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fik = fbond*r_ik[m];
f[ai][m] += fik;
f[ak][m] -= fik;
fshift[ki][m] += fik;
fshift[CENTRAL][m] -= fik;
}
}
return vtot;
}
real quartic_angles(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, j, ai, aj, ak, t1, t2, type;
rvec r_ij, r_kj;
real cos_theta, cos_theta2, theta, dt, dVdt, va, dtp, c, vtot;
ivec jt, dt_ij, dt_kj;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
theta = bond_angle(x[ai], x[aj], x[ak], pbc,
r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */
dt = theta - forceparams[type].qangle.theta*DEG2RAD; /* 2 */
dVdt = 0;
va = forceparams[type].qangle.c[0];
dtp = 1.0;
for (j = 1; j <= 4; j++)
{
c = forceparams[type].qangle.c[j];
dVdt -= j*c*dtp;
dtp *= dt;
va += c*dtp;
}
/* 20 */
vtot += va;
cos_theta2 = sqr(cos_theta); /* 1 */
if (cos_theta2 < 1)
{
int m;
real st, sth;
real cik, cii, ckk;
real nrkj2, nrij2;
rvec f_i, f_j, f_k;
st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */
sth = st*cos_theta; /* 1 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n",
theta*RAD2DEG, va, dVdt);
}
#endif
nrkj2 = iprod(r_kj, r_kj); /* 5 */
nrij2 = iprod(r_ij, r_ij);
cik = st*gmx_invsqrt(nrkj2*nrij2); /* 12 */
cii = sth/nrij2; /* 10 */
ckk = sth/nrkj2; /* 10 */
for (m = 0; (m < DIM); m++) /* 39 */
{
f_i[m] = -(cik*r_kj[m]-cii*r_ij[m]);
f_k[m] = -(cik*r_ij[m]-ckk*r_kj[m]);
f_j[m] = -f_i[m]-f_k[m];
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
if (g)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k);
} /* 153 TOTAL */
}
return vtot;
}
real dih_angle(const rvec xi, const rvec xj, const rvec xk, const rvec xl,
const t_pbc *pbc,
rvec r_ij, rvec r_kj, rvec r_kl, rvec m, rvec n,
real *sign, int *t1, int *t2, int *t3)
{
real ipr, phi;
*t1 = pbc_rvec_sub(pbc, xi, xj, r_ij); /* 3 */
*t2 = pbc_rvec_sub(pbc, xk, xj, r_kj); /* 3 */
*t3 = pbc_rvec_sub(pbc, xk, xl, r_kl); /* 3 */
cprod(r_ij, r_kj, m); /* 9 */
cprod(r_kj, r_kl, n); /* 9 */
phi = gmx_angle(m, n); /* 49 (assuming 25 for atan2) */
ipr = iprod(r_ij, n); /* 5 */
(*sign) = (ipr < 0.0) ? -1.0 : 1.0;
phi = (*sign)*phi; /* 1 */
/* 82 TOTAL */
return phi;
}
#ifdef SIMD_BONDEDS
/* As dih_angle above, but calculates 4 dihedral angles at once using SIMD,
* also calculates the pre-factor required for the dihedral force update.
* Note that bv and buf should be register aligned.
*/
static gmx_inline void
dih_angle_simd(const rvec *x,
const int *ai, const int *aj, const int *ak, const int *al,
const pbc_simd_t *pbc,
real *dr,
gmx_mm_pr *phi_S,
gmx_mm_pr *mx_S, gmx_mm_pr *my_S, gmx_mm_pr *mz_S,
gmx_mm_pr *nx_S, gmx_mm_pr *ny_S, gmx_mm_pr *nz_S,
gmx_mm_pr *nrkj_m2_S,
gmx_mm_pr *nrkj_n2_S,
real *p,
real *q)
{
#define UNROLL GMX_SIMD_WIDTH_HERE
int s, m;
gmx_mm_pr rijx_S, rijy_S, rijz_S;
gmx_mm_pr rkjx_S, rkjy_S, rkjz_S;
gmx_mm_pr rklx_S, rkly_S, rklz_S;
gmx_mm_pr cx_S, cy_S, cz_S;
gmx_mm_pr cn_S;
gmx_mm_pr s_S;
gmx_mm_pr ipr_S;
gmx_mm_pr iprm_S, iprn_S;
gmx_mm_pr nrkj2_S, nrkj_1_S, nrkj_2_S, nrkj_S;
gmx_mm_pr toler_S;
gmx_mm_pr p_S, q_S;
gmx_mm_pr nrkj2_min_S;
gmx_mm_pr real_eps_S;
/* Used to avoid division by zero.
* We take into acount that we multiply the result by real_eps_S.
*/
nrkj2_min_S = gmx_set1_pr(GMX_REAL_MIN/(2*GMX_REAL_EPS));
/* The value of the last significant bit (GMX_REAL_EPS is half of that) */
real_eps_S = gmx_set1_pr(2*GMX_REAL_EPS);
for (s = 0; s < UNROLL; s++)
{
/* If you can't use pbc_dx_simd below for PBC, e.g. because
* you can't round in SIMD, use pbc_rvec_sub here.
*/
for (m = 0; m < DIM; m++)
{
dr[s + (0*DIM + m)*UNROLL] = x[ai[s]][m] - x[aj[s]][m];
dr[s + (1*DIM + m)*UNROLL] = x[ak[s]][m] - x[aj[s]][m];
dr[s + (2*DIM + m)*UNROLL] = x[ak[s]][m] - x[al[s]][m];
}
}
rijx_S = gmx_load_pr(dr + 0*UNROLL);
rijy_S = gmx_load_pr(dr + 1*UNROLL);
rijz_S = gmx_load_pr(dr + 2*UNROLL);
rkjx_S = gmx_load_pr(dr + 3*UNROLL);
rkjy_S = gmx_load_pr(dr + 4*UNROLL);
rkjz_S = gmx_load_pr(dr + 5*UNROLL);
rklx_S = gmx_load_pr(dr + 6*UNROLL);
rkly_S = gmx_load_pr(dr + 7*UNROLL);
rklz_S = gmx_load_pr(dr + 8*UNROLL);
pbc_dx_simd(&rijx_S, &rijy_S, &rijz_S, pbc);
pbc_dx_simd(&rkjx_S, &rkjy_S, &rkjz_S, pbc);
pbc_dx_simd(&rklx_S, &rkly_S, &rklz_S, pbc);
gmx_cprod_pr(rijx_S, rijy_S, rijz_S,
rkjx_S, rkjy_S, rkjz_S,
mx_S, my_S, mz_S);
gmx_cprod_pr(rkjx_S, rkjy_S, rkjz_S,
rklx_S, rkly_S, rklz_S,
nx_S, ny_S, nz_S);
gmx_cprod_pr(*mx_S, *my_S, *mz_S,
*nx_S, *ny_S, *nz_S,
&cx_S, &cy_S, &cz_S);
cn_S = gmx_sqrt_pr(gmx_norm2_pr(cx_S, cy_S, cz_S));
s_S = gmx_iprod_pr(*mx_S, *my_S, *mz_S, *nx_S, *ny_S, *nz_S);
/* Determine the dihedral angle, the sign might need correction */
*phi_S = gmx_atan2_pr(cn_S, s_S);
ipr_S = gmx_iprod_pr(rijx_S, rijy_S, rijz_S,
*nx_S, *ny_S, *nz_S);
iprm_S = gmx_norm2_pr(*mx_S, *my_S, *mz_S);
iprn_S = gmx_norm2_pr(*nx_S, *ny_S, *nz_S);
nrkj2_S = gmx_norm2_pr(rkjx_S, rkjy_S, rkjz_S);
/* Avoid division by zero. When zero, the result is multiplied by 0
* anyhow, so the 3 max below do not affect the final result.
*/
nrkj2_S = gmx_max_pr(nrkj2_S, nrkj2_min_S);
nrkj_1_S = gmx_invsqrt_pr(nrkj2_S);
nrkj_2_S = gmx_mul_pr(nrkj_1_S, nrkj_1_S);
nrkj_S = gmx_mul_pr(nrkj2_S, nrkj_1_S);
toler_S = gmx_mul_pr(nrkj2_S, real_eps_S);
/* Here the plain-C code uses a conditional, but we can't do that in SIMD.
* So we take a max with the tolerance instead. Since we multiply with
* m or n later, the max does not affect the results.
*/
iprm_S = gmx_max_pr(iprm_S, toler_S);
iprn_S = gmx_max_pr(iprn_S, toler_S);
*nrkj_m2_S = gmx_mul_pr(nrkj_S, gmx_inv_pr(iprm_S));
*nrkj_n2_S = gmx_mul_pr(nrkj_S, gmx_inv_pr(iprn_S));
/* Set sign of phi_S with the sign of ipr_S; phi_S is currently positive */
*phi_S = gmx_cpsgn_nonneg_pr(ipr_S, *phi_S);
p_S = gmx_iprod_pr(rijx_S, rijy_S, rijz_S,
rkjx_S, rkjy_S, rkjz_S);
p_S = gmx_mul_pr(p_S, nrkj_2_S);
q_S = gmx_iprod_pr(rklx_S, rkly_S, rklz_S,
rkjx_S, rkjy_S, rkjz_S);
q_S = gmx_mul_pr(q_S, nrkj_2_S);
gmx_store_pr(p, p_S);
gmx_store_pr(q, q_S);
#undef UNROLL
}
#endif /* SIMD_BONDEDS */
void do_dih_fup(int i, int j, int k, int l, real ddphi,
rvec r_ij, rvec r_kj, rvec r_kl,
rvec m, rvec n, rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
const rvec x[], int t1, int t2, int t3)
{
/* 143 FLOPS */
rvec f_i, f_j, f_k, f_l;
rvec uvec, vvec, svec, dx_jl;
real iprm, iprn, nrkj, nrkj2, nrkj_1, nrkj_2;
real a, b, p, q, toler;
ivec jt, dt_ij, dt_kj, dt_lj;
iprm = iprod(m, m); /* 5 */
iprn = iprod(n, n); /* 5 */
nrkj2 = iprod(r_kj, r_kj); /* 5 */
toler = nrkj2*GMX_REAL_EPS;
if ((iprm > toler) && (iprn > toler))
{
nrkj_1 = gmx_invsqrt(nrkj2); /* 10 */
nrkj_2 = nrkj_1*nrkj_1; /* 1 */
nrkj = nrkj2*nrkj_1; /* 1 */
a = -ddphi*nrkj/iprm; /* 11 */
svmul(a, m, f_i); /* 3 */
b = ddphi*nrkj/iprn; /* 11 */
svmul(b, n, f_l); /* 3 */
p = iprod(r_ij, r_kj); /* 5 */
p *= nrkj_2; /* 1 */
q = iprod(r_kl, r_kj); /* 5 */
q *= nrkj_2; /* 1 */
svmul(p, f_i, uvec); /* 3 */
svmul(q, f_l, vvec); /* 3 */
rvec_sub(uvec, vvec, svec); /* 3 */
rvec_sub(f_i, svec, f_j); /* 3 */
rvec_add(f_l, svec, f_k); /* 3 */
rvec_inc(f[i], f_i); /* 3 */
rvec_dec(f[j], f_j); /* 3 */
rvec_dec(f[k], f_k); /* 3 */
rvec_inc(f[l], f_l); /* 3 */
if (g)
{
copy_ivec(SHIFT_IVEC(g, j), jt);
ivec_sub(SHIFT_IVEC(g, i), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, k), jt, dt_kj);
ivec_sub(SHIFT_IVEC(g, l), jt, dt_lj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
t3 = IVEC2IS(dt_lj);
}
else if (pbc)
{
t3 = pbc_rvec_sub(pbc, x[l], x[j], dx_jl);
}
else
{
t3 = CENTRAL;
}
rvec_inc(fshift[t1], f_i);
rvec_dec(fshift[CENTRAL], f_j);
rvec_dec(fshift[t2], f_k);
rvec_inc(fshift[t3], f_l);
}
/* 112 TOTAL */
}
/* As do_dih_fup above, but without shift forces */
static void
do_dih_fup_noshiftf(int i, int j, int k, int l, real ddphi,
rvec r_ij, rvec r_kj, rvec r_kl,
rvec m, rvec n, rvec f[])
{
rvec f_i, f_j, f_k, f_l;
rvec uvec, vvec, svec, dx_jl;
real iprm, iprn, nrkj, nrkj2, nrkj_1, nrkj_2;
real a, b, p, q, toler;
ivec jt, dt_ij, dt_kj, dt_lj;
iprm = iprod(m, m); /* 5 */
iprn = iprod(n, n); /* 5 */
nrkj2 = iprod(r_kj, r_kj); /* 5 */
toler = nrkj2*GMX_REAL_EPS;
if ((iprm > toler) && (iprn > toler))
{
nrkj_1 = gmx_invsqrt(nrkj2); /* 10 */
nrkj_2 = nrkj_1*nrkj_1; /* 1 */
nrkj = nrkj2*nrkj_1; /* 1 */
a = -ddphi*nrkj/iprm; /* 11 */
svmul(a, m, f_i); /* 3 */
b = ddphi*nrkj/iprn; /* 11 */
svmul(b, n, f_l); /* 3 */
p = iprod(r_ij, r_kj); /* 5 */
p *= nrkj_2; /* 1 */
q = iprod(r_kl, r_kj); /* 5 */
q *= nrkj_2; /* 1 */
svmul(p, f_i, uvec); /* 3 */
svmul(q, f_l, vvec); /* 3 */
rvec_sub(uvec, vvec, svec); /* 3 */
rvec_sub(f_i, svec, f_j); /* 3 */
rvec_add(f_l, svec, f_k); /* 3 */
rvec_inc(f[i], f_i); /* 3 */
rvec_dec(f[j], f_j); /* 3 */
rvec_dec(f[k], f_k); /* 3 */
rvec_inc(f[l], f_l); /* 3 */
}
}
/* As do_dih_fup_noshiftf above, but with pre-calculated pre-factors */
static gmx_inline void
do_dih_fup_noshiftf_precalc(int i, int j, int k, int l,
real p, real q,
real f_i_x, real f_i_y, real f_i_z,
real mf_l_x, real mf_l_y, real mf_l_z,
rvec f[])
{
rvec f_i, f_j, f_k, f_l;
rvec uvec, vvec, svec;
f_i[XX] = f_i_x;
f_i[YY] = f_i_y;
f_i[ZZ] = f_i_z;
f_l[XX] = -mf_l_x;
f_l[YY] = -mf_l_y;
f_l[ZZ] = -mf_l_z;
svmul(p, f_i, uvec);
svmul(q, f_l, vvec);
rvec_sub(uvec, vvec, svec);
rvec_sub(f_i, svec, f_j);
rvec_add(f_l, svec, f_k);
rvec_inc(f[i], f_i);
rvec_dec(f[j], f_j);
rvec_dec(f[k], f_k);
rvec_inc(f[l], f_l);
}
real dopdihs(real cpA, real cpB, real phiA, real phiB, int mult,
real phi, real lambda, real *V, real *F)
{
real v, dvdlambda, mdphi, v1, sdphi, ddphi;
real L1 = 1.0 - lambda;
real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD;
real dph0 = (phiB - phiA)*DEG2RAD;
real cp = L1*cpA + lambda*cpB;
mdphi = mult*phi - ph0;
sdphi = sin(mdphi);
ddphi = -cp*mult*sdphi;
v1 = 1.0 + cos(mdphi);
v = cp*v1;
dvdlambda = (cpB - cpA)*v1 + cp*dph0*sdphi;
*V = v;
*F = ddphi;
return dvdlambda;
/* That was 40 flops */
}
static void
dopdihs_noener(real cpA, real cpB, real phiA, real phiB, int mult,
real phi, real lambda, real *F)
{
real mdphi, sdphi, ddphi;
real L1 = 1.0 - lambda;
real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD;
real cp = L1*cpA + lambda*cpB;
mdphi = mult*phi - ph0;
sdphi = sin(mdphi);
ddphi = -cp*mult*sdphi;
*F = ddphi;
/* That was 20 flops */
}
static void
dopdihs_mdphi(real cpA, real cpB, real phiA, real phiB, int mult,
real phi, real lambda, real *cp, real *mdphi)
{
real L1 = 1.0 - lambda;
real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD;
*cp = L1*cpA + lambda*cpB;
*mdphi = mult*phi - ph0;
}
static real dopdihs_min(real cpA, real cpB, real phiA, real phiB, int mult,
real phi, real lambda, real *V, real *F)
/* similar to dopdihs, except for a minus sign *
* and a different treatment of mult/phi0 */
{
real v, dvdlambda, mdphi, v1, sdphi, ddphi;
real L1 = 1.0 - lambda;
real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD;
real dph0 = (phiB - phiA)*DEG2RAD;
real cp = L1*cpA + lambda*cpB;
mdphi = mult*(phi-ph0);
sdphi = sin(mdphi);
ddphi = cp*mult*sdphi;
v1 = 1.0-cos(mdphi);
v = cp*v1;
dvdlambda = (cpB-cpA)*v1 + cp*dph0*sdphi;
*V = v;
*F = ddphi;
return dvdlambda;
/* That was 40 flops */
}
real pdihs(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, type, ai, aj, ak, al;
int t1, t2, t3;
rvec r_ij, r_kj, r_kl, m, n;
real phi, sign, ddphi, vpd, vtot;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
al = forceatoms[i++];
phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n,
&sign, &t1, &t2, &t3); /* 84 */
*dvdlambda += dopdihs(forceparams[type].pdihs.cpA,
forceparams[type].pdihs.cpB,
forceparams[type].pdihs.phiA,
forceparams[type].pdihs.phiB,
forceparams[type].pdihs.mult,
phi, lambda, &vpd, &ddphi);
vtot += vpd;
do_dih_fup(ai, aj, ak, al, ddphi, r_ij, r_kj, r_kl, m, n,
f, fshift, pbc, g, x, t1, t2, t3); /* 112 */
#ifdef DEBUG
fprintf(debug, "pdih: (%d,%d,%d,%d) phi=%g\n",
ai, aj, ak, al, phi);
#endif
} /* 223 TOTAL */
return vtot;
}
void make_dp_periodic(real *dp) /* 1 flop? */
{
/* dp cannot be outside (-pi,pi) */
if (*dp >= M_PI)
{
*dp -= 2*M_PI;
}
else if (*dp < -M_PI)
{
*dp += 2*M_PI;
}
return;
}
/* As pdihs above, but without calculating energies and shift forces */
static void
pdihs_noener(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[],
const t_pbc *pbc, const t_graph *g,
real lambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, type, ai, aj, ak, al;
int t1, t2, t3;
rvec r_ij, r_kj, r_kl, m, n;
real phi, sign, ddphi_tot, ddphi;
for (i = 0; (i < nbonds); )
{
ai = forceatoms[i+1];
aj = forceatoms[i+2];
ak = forceatoms[i+3];
al = forceatoms[i+4];
phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n,
&sign, &t1, &t2, &t3);
ddphi_tot = 0;
/* Loop over dihedrals working on the same atoms,
* so we avoid recalculating angles and force distributions.
*/
do
{
type = forceatoms[i];
dopdihs_noener(forceparams[type].pdihs.cpA,
forceparams[type].pdihs.cpB,
forceparams[type].pdihs.phiA,
forceparams[type].pdihs.phiB,
forceparams[type].pdihs.mult,
phi, lambda, &ddphi);
ddphi_tot += ddphi;
i += 5;
}
while (i < nbonds &&
forceatoms[i+1] == ai &&
forceatoms[i+2] == aj &&
forceatoms[i+3] == ak &&
forceatoms[i+4] == al);
do_dih_fup_noshiftf(ai, aj, ak, al, ddphi_tot, r_ij, r_kj, r_kl, m, n, f);
}
}
#ifdef SIMD_BONDEDS
/* As pdihs_noner above, but using SIMD to calculate many dihedrals at once */
static void
pdihs_noener_simd(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[],
const t_pbc *pbc, const t_graph *g,
real lambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
#define UNROLL GMX_SIMD_WIDTH_HERE
const int nfa1 = 5;
int i, iu, s;
int type, ai[UNROLL], aj[UNROLL], ak[UNROLL], al[UNROLL];
int t1[UNROLL], t2[UNROLL], t3[UNROLL];
real ddphi;
real dr_array[3*DIM*UNROLL+UNROLL], *dr;
real buf_array[7*UNROLL+UNROLL], *buf;
real *cp, *phi0, *mult, *phi, *p, *q, *sf_i, *msf_l;
gmx_mm_pr phi0_S, phi_S;
gmx_mm_pr mx_S, my_S, mz_S;
gmx_mm_pr nx_S, ny_S, nz_S;
gmx_mm_pr nrkj_m2_S, nrkj_n2_S;
gmx_mm_pr cp_S, mdphi_S, mult_S;
gmx_mm_pr sin_S, cos_S;
gmx_mm_pr mddphi_S;
gmx_mm_pr sf_i_S, msf_l_S;
pbc_simd_t pbc_simd;
/* Ensure SIMD register alignment */
dr = gmx_simd_align_real(dr_array);
buf = gmx_simd_align_real(buf_array);
/* Extract aligned pointer for parameters and variables */
cp = buf + 0*UNROLL;
phi0 = buf + 1*UNROLL;
mult = buf + 2*UNROLL;
p = buf + 3*UNROLL;
q = buf + 4*UNROLL;
sf_i = buf + 5*UNROLL;
msf_l = buf + 6*UNROLL;
set_pbc_simd(pbc, &pbc_simd);
/* nbonds is the number of dihedrals times nfa1, here we step UNROLL dihs */
for (i = 0; (i < nbonds); i += UNROLL*nfa1)
{
/* Collect atoms quadruplets for UNROLL dihedrals.
* iu indexes into forceatoms, we should not let iu go beyond nbonds.
*/
iu = i;
for (s = 0; s < UNROLL; s++)
{
type = forceatoms[iu];
ai[s] = forceatoms[iu+1];
aj[s] = forceatoms[iu+2];
ak[s] = forceatoms[iu+3];
al[s] = forceatoms[iu+4];
cp[s] = forceparams[type].pdihs.cpA;
phi0[s] = forceparams[type].pdihs.phiA*DEG2RAD;
mult[s] = forceparams[type].pdihs.mult;
/* At the end fill the arrays with identical entries */
if (iu + nfa1 < nbonds)
{
iu += nfa1;
}
}
/* Caclulate UNROLL dihedral angles at once */
dih_angle_simd(x, ai, aj, ak, al, &pbc_simd,
dr,
&phi_S,
&mx_S, &my_S, &mz_S,
&nx_S, &ny_S, &nz_S,
&nrkj_m2_S,
&nrkj_n2_S,
p, q);
cp_S = gmx_load_pr(cp);
phi0_S = gmx_load_pr(phi0);
mult_S = gmx_load_pr(mult);
mdphi_S = gmx_sub_pr(gmx_mul_pr(mult_S, phi_S), phi0_S);
/* Calculate UNROLL sines at once */
gmx_sincos_pr(mdphi_S, &sin_S, &cos_S);
mddphi_S = gmx_mul_pr(gmx_mul_pr(cp_S, mult_S), sin_S);
sf_i_S = gmx_mul_pr(mddphi_S, nrkj_m2_S);
msf_l_S = gmx_mul_pr(mddphi_S, nrkj_n2_S);
/* After this m?_S will contain f[i] */
mx_S = gmx_mul_pr(sf_i_S, mx_S);
my_S = gmx_mul_pr(sf_i_S, my_S);
mz_S = gmx_mul_pr(sf_i_S, mz_S);
/* After this m?_S will contain -f[l] */
nx_S = gmx_mul_pr(msf_l_S, nx_S);
ny_S = gmx_mul_pr(msf_l_S, ny_S);
nz_S = gmx_mul_pr(msf_l_S, nz_S);
gmx_store_pr(dr + 0*UNROLL, mx_S);
gmx_store_pr(dr + 1*UNROLL, my_S);
gmx_store_pr(dr + 2*UNROLL, mz_S);
gmx_store_pr(dr + 3*UNROLL, nx_S);
gmx_store_pr(dr + 4*UNROLL, ny_S);
gmx_store_pr(dr + 5*UNROLL, nz_S);
iu = i;
s = 0;
do
{
do_dih_fup_noshiftf_precalc(ai[s], aj[s], ak[s], al[s],
p[s], q[s],
dr[ XX *UNROLL+s],
dr[ YY *UNROLL+s],
dr[ ZZ *UNROLL+s],
dr[(DIM+XX)*UNROLL+s],
dr[(DIM+YY)*UNROLL+s],
dr[(DIM+ZZ)*UNROLL+s],
f);
s++;
iu += nfa1;
}
while (s < UNROLL && iu < nbonds);
}
#undef UNROLL
}
#endif /* SIMD_BONDEDS */
real idihs(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, type, ai, aj, ak, al;
int t1, t2, t3;
real phi, phi0, dphi0, ddphi, sign, vtot;
rvec r_ij, r_kj, r_kl, m, n;
real L1, kk, dp, dp2, kA, kB, pA, pB, dvdl_term;
L1 = 1.0-lambda;
dvdl_term = 0;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
al = forceatoms[i++];
phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n,
&sign, &t1, &t2, &t3); /* 84 */
/* phi can jump if phi0 is close to Pi/-Pi, which will cause huge
* force changes if we just apply a normal harmonic.
* Instead, we first calculate phi-phi0 and take it modulo (-Pi,Pi).
* This means we will never have the periodicity problem, unless
* the dihedral is Pi away from phiO, which is very unlikely due to
* the potential.
*/
kA = forceparams[type].harmonic.krA;
kB = forceparams[type].harmonic.krB;
pA = forceparams[type].harmonic.rA;
pB = forceparams[type].harmonic.rB;
kk = L1*kA + lambda*kB;
phi0 = (L1*pA + lambda*pB)*DEG2RAD;
dphi0 = (pB - pA)*DEG2RAD;
dp = phi-phi0;
make_dp_periodic(&dp);
dp2 = dp*dp;
vtot += 0.5*kk*dp2;
ddphi = -kk*dp;
dvdl_term += 0.5*(kB - kA)*dp2 - kk*dphi0*dp;
do_dih_fup(ai, aj, ak, al, (real)(-ddphi), r_ij, r_kj, r_kl, m, n,
f, fshift, pbc, g, x, t1, t2, t3); /* 112 */
/* 218 TOTAL */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "idih: (%d,%d,%d,%d) phi=%g\n",
ai, aj, ak, al, phi);
}
#endif
}
*dvdlambda += dvdl_term;
return vtot;
}
real posres(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec vir_diag,
t_pbc *pbc,
real lambda, real *dvdlambda,
int refcoord_scaling, int ePBC, rvec comA, rvec comB)
{
int i, ai, m, d, type, ki, npbcdim = 0;
const t_iparams *pr;
real L1;
real vtot, kk, fm;
real posA, posB, ref = 0;
rvec comA_sc, comB_sc, rdist, dpdl, pos, dx;
gmx_bool bForceValid = TRUE;
if ((f == NULL) || (vir_diag == NULL)) /* should both be null together! */
{
bForceValid = FALSE;
}
npbcdim = ePBC2npbcdim(ePBC);
if (refcoord_scaling == erscCOM)
{
clear_rvec(comA_sc);
clear_rvec(comB_sc);
for (m = 0; m < npbcdim; m++)
{
for (d = m; d < npbcdim; d++)
{
comA_sc[m] += comA[d]*pbc->box[d][m];
comB_sc[m] += comB[d]*pbc->box[d][m];
}
}
}
L1 = 1.0 - lambda;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
pr = &forceparams[type];
for (m = 0; m < DIM; m++)
{
posA = forceparams[type].posres.pos0A[m];
posB = forceparams[type].posres.pos0B[m];
if (m < npbcdim)
{
switch (refcoord_scaling)
{
case erscNO:
ref = 0;
rdist[m] = L1*posA + lambda*posB;
dpdl[m] = posB - posA;
break;
case erscALL:
/* Box relative coordinates are stored for dimensions with pbc */
posA *= pbc->box[m][m];
posB *= pbc->box[m][m];
for (d = m+1; d < npbcdim; d++)
{
posA += forceparams[type].posres.pos0A[d]*pbc->box[d][m];
posB += forceparams[type].posres.pos0B[d]*pbc->box[d][m];
}
ref = L1*posA + lambda*posB;
rdist[m] = 0;
dpdl[m] = posB - posA;
break;
case erscCOM:
ref = L1*comA_sc[m] + lambda*comB_sc[m];
rdist[m] = L1*posA + lambda*posB;
dpdl[m] = comB_sc[m] - comA_sc[m] + posB - posA;
break;
default:
gmx_fatal(FARGS, "No such scaling method implemented");
}
}
else
{
ref = L1*posA + lambda*posB;
rdist[m] = 0;
dpdl[m] = posB - posA;
}
/* We do pbc_dx with ref+rdist,
* since with only ref we can be up to half a box vector wrong.
*/
pos[m] = ref + rdist[m];
}
if (pbc)
{
pbc_dx(pbc, x[ai], pos, dx);
}
else
{
rvec_sub(x[ai], pos, dx);
}
for (m = 0; (m < DIM); m++)
{
kk = L1*pr->posres.fcA[m] + lambda*pr->posres.fcB[m];
fm = -kk*dx[m];
vtot += 0.5*kk*dx[m]*dx[m];
*dvdlambda +=
0.5*(pr->posres.fcB[m] - pr->posres.fcA[m])*dx[m]*dx[m]
-fm*dpdl[m];
/* Here we correct for the pbc_dx which included rdist */
if (bForceValid)
{
f[ai][m] += fm;
vir_diag[m] -= 0.5*(dx[m] + rdist[m])*fm;
}
}
}
return vtot;
}
static real low_angres(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
gmx_bool bZAxis)
{
int i, m, type, ai, aj, ak, al;
int t1, t2;
real phi, cos_phi, cos_phi2, vid, vtot, dVdphi;
rvec r_ij, r_kl, f_i, f_k = {0, 0, 0};
real st, sth, nrij2, nrkl2, c, cij, ckl;
ivec dt;
t2 = 0; /* avoid warning with gcc-3.3. It is never used uninitialized */
vtot = 0.0;
ak = al = 0; /* to avoid warnings */
for (i = 0; i < nbonds; )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
t1 = pbc_rvec_sub(pbc, x[aj], x[ai], r_ij); /* 3 */
if (!bZAxis)
{
ak = forceatoms[i++];
al = forceatoms[i++];
t2 = pbc_rvec_sub(pbc, x[al], x[ak], r_kl); /* 3 */
}
else
{
r_kl[XX] = 0;
r_kl[YY] = 0;
r_kl[ZZ] = 1;
}
cos_phi = cos_angle(r_ij, r_kl); /* 25 */
phi = acos(cos_phi); /* 10 */
*dvdlambda += dopdihs_min(forceparams[type].pdihs.cpA,
forceparams[type].pdihs.cpB,
forceparams[type].pdihs.phiA,
forceparams[type].pdihs.phiB,
forceparams[type].pdihs.mult,
phi, lambda, &vid, &dVdphi); /* 40 */
vtot += vid;
cos_phi2 = sqr(cos_phi); /* 1 */
if (cos_phi2 < 1)
{
st = -dVdphi*gmx_invsqrt(1 - cos_phi2); /* 12 */
sth = st*cos_phi; /* 1 */
nrij2 = iprod(r_ij, r_ij); /* 5 */
nrkl2 = iprod(r_kl, r_kl); /* 5 */
c = st*gmx_invsqrt(nrij2*nrkl2); /* 11 */
cij = sth/nrij2; /* 10 */
ckl = sth/nrkl2; /* 10 */
for (m = 0; m < DIM; m++) /* 18+18 */
{
f_i[m] = (c*r_kl[m]-cij*r_ij[m]);
f[ai][m] += f_i[m];
f[aj][m] -= f_i[m];
if (!bZAxis)
{
f_k[m] = (c*r_ij[m]-ckl*r_kl[m]);
f[ak][m] += f_k[m];
f[al][m] -= f_k[m];
}
}
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
t1 = IVEC2IS(dt);
}
rvec_inc(fshift[t1], f_i);
rvec_dec(fshift[CENTRAL], f_i);
if (!bZAxis)
{
if (g)
{
ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, al), dt);
t2 = IVEC2IS(dt);
}
rvec_inc(fshift[t2], f_k);
rvec_dec(fshift[CENTRAL], f_k);
}
}
}
return vtot; /* 184 / 157 (bZAxis) total */
}
real angres(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
return low_angres(nbonds, forceatoms, forceparams, x, f, fshift, pbc, g,
lambda, dvdlambda, FALSE);
}
real angresz(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
return low_angres(nbonds, forceatoms, forceparams, x, f, fshift, pbc, g,
lambda, dvdlambda, TRUE);
}
real dihres(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
real vtot = 0;
int ai, aj, ak, al, i, k, type, t1, t2, t3;
real phi0A, phi0B, dphiA, dphiB, kfacA, kfacB, phi0, dphi, kfac;
real phi, ddphi, ddp, ddp2, dp, sign, d2r, fc, L1;
rvec r_ij, r_kj, r_kl, m, n;
L1 = 1.0-lambda;
d2r = DEG2RAD;
k = 0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
al = forceatoms[i++];
phi0A = forceparams[type].dihres.phiA*d2r;
dphiA = forceparams[type].dihres.dphiA*d2r;
kfacA = forceparams[type].dihres.kfacA;
phi0B = forceparams[type].dihres.phiB*d2r;
dphiB = forceparams[type].dihres.dphiB*d2r;
kfacB = forceparams[type].dihres.kfacB;
phi0 = L1*phi0A + lambda*phi0B;
dphi = L1*dphiA + lambda*dphiB;
kfac = L1*kfacA + lambda*kfacB;
phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n,
&sign, &t1, &t2, &t3);
/* 84 flops */
if (debug)
{
fprintf(debug, "dihres[%d]: %d %d %d %d : phi=%f, dphi=%f, kfac=%f\n",
k++, ai, aj, ak, al, phi0, dphi, kfac);
}
/* phi can jump if phi0 is close to Pi/-Pi, which will cause huge
* force changes if we just apply a normal harmonic.
* Instead, we first calculate phi-phi0 and take it modulo (-Pi,Pi).
* This means we will never have the periodicity problem, unless
* the dihedral is Pi away from phiO, which is very unlikely due to
* the potential.
*/
dp = phi-phi0;
make_dp_periodic(&dp);
if (dp > dphi)
{
ddp = dp-dphi;
}
else if (dp < -dphi)
{
ddp = dp+dphi;
}
else
{
ddp = 0;
}
if (ddp != 0.0)
{
ddp2 = ddp*ddp;
vtot += 0.5*kfac*ddp2;
ddphi = kfac*ddp;
*dvdlambda += 0.5*(kfacB - kfacA)*ddp2;
/* lambda dependence from changing restraint distances */
if (ddp > 0)
{
*dvdlambda -= kfac*ddp*((dphiB - dphiA)+(phi0B - phi0A));
}
else if (ddp < 0)
{
*dvdlambda += kfac*ddp*((dphiB - dphiA)-(phi0B - phi0A));
}
do_dih_fup(ai, aj, ak, al, ddphi, r_ij, r_kj, r_kl, m, n,
f, fshift, pbc, g, x, t1, t2, t3); /* 112 */
}
}
return vtot;
}
real unimplemented(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
gmx_impl("*** you are using a not implemented function");
return 0.0; /* To make the compiler happy */
}
real rbdihs(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
const real c0 = 0.0, c1 = 1.0, c2 = 2.0, c3 = 3.0, c4 = 4.0, c5 = 5.0;
int type, ai, aj, ak, al, i, j;
int t1, t2, t3;
rvec r_ij, r_kj, r_kl, m, n;
real parmA[NR_RBDIHS];
real parmB[NR_RBDIHS];
real parm[NR_RBDIHS];
real cos_phi, phi, rbp, rbpBA;
real v, sign, ddphi, sin_phi;
real cosfac, vtot;
real L1 = 1.0-lambda;
real dvdl_term = 0;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
al = forceatoms[i++];
phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n,
&sign, &t1, &t2, &t3); /* 84 */
/* Change to polymer convention */
if (phi < c0)
{
phi += M_PI;
}
else
{
phi -= M_PI; /* 1 */
}
cos_phi = cos(phi);
/* Beware of accuracy loss, cannot use 1-sqrt(cos^2) ! */
sin_phi = sin(phi);
for (j = 0; (j < NR_RBDIHS); j++)
{
parmA[j] = forceparams[type].rbdihs.rbcA[j];
parmB[j] = forceparams[type].rbdihs.rbcB[j];
parm[j] = L1*parmA[j]+lambda*parmB[j];
}
/* Calculate cosine powers */
/* Calculate the energy */
/* Calculate the derivative */
v = parm[0];
dvdl_term += (parmB[0]-parmA[0]);
ddphi = c0;
cosfac = c1;
rbp = parm[1];
rbpBA = parmB[1]-parmA[1];
ddphi += rbp*cosfac;
cosfac *= cos_phi;
v += cosfac*rbp;
dvdl_term += cosfac*rbpBA;
rbp = parm[2];
rbpBA = parmB[2]-parmA[2];
ddphi += c2*rbp*cosfac;
cosfac *= cos_phi;
v += cosfac*rbp;
dvdl_term += cosfac*rbpBA;
rbp = parm[3];
rbpBA = parmB[3]-parmA[3];
ddphi += c3*rbp*cosfac;
cosfac *= cos_phi;
v += cosfac*rbp;
dvdl_term += cosfac*rbpBA;
rbp = parm[4];
rbpBA = parmB[4]-parmA[4];
ddphi += c4*rbp*cosfac;
cosfac *= cos_phi;
v += cosfac*rbp;
dvdl_term += cosfac*rbpBA;
rbp = parm[5];
rbpBA = parmB[5]-parmA[5];
ddphi += c5*rbp*cosfac;
cosfac *= cos_phi;
v += cosfac*rbp;
dvdl_term += cosfac*rbpBA;
ddphi = -ddphi*sin_phi; /* 11 */
do_dih_fup(ai, aj, ak, al, ddphi, r_ij, r_kj, r_kl, m, n,
f, fshift, pbc, g, x, t1, t2, t3); /* 112 */
vtot += v;
}
*dvdlambda += dvdl_term;
return vtot;
}
int cmap_setup_grid_index(int ip, int grid_spacing, int *ipm1, int *ipp1, int *ipp2)
{
int im1, ip1, ip2;
if (ip < 0)
{
ip = ip + grid_spacing - 1;
}
else if (ip > grid_spacing)
{
ip = ip - grid_spacing - 1;
}
im1 = ip - 1;
ip1 = ip + 1;
ip2 = ip + 2;
if (ip == 0)
{
im1 = grid_spacing - 1;
}
else if (ip == grid_spacing-2)
{
ip2 = 0;
}
else if (ip == grid_spacing-1)
{
ip1 = 0;
ip2 = 1;
}
*ipm1 = im1;
*ipp1 = ip1;
*ipp2 = ip2;
return ip;
}
real cmap_dihs(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const gmx_cmap_t *cmap_grid,
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, j, k, n, idx;
int ai, aj, ak, al, am;
int a1i, a1j, a1k, a1l, a2i, a2j, a2k, a2l;
int type, cmapA;
int t11, t21, t31, t12, t22, t32;
int iphi1, ip1m1, ip1p1, ip1p2;
int iphi2, ip2m1, ip2p1, ip2p2;
int l1, l2, l3, l4;
int pos1, pos2, pos3, pos4, tmp;
real ty[4], ty1[4], ty2[4], ty12[4], tc[16], tx[16];
real phi1, psi1, cos_phi1, sin_phi1, sign1, xphi1;
real phi2, psi2, cos_phi2, sin_phi2, sign2, xphi2;
real dx, xx, tt, tu, e, df1, df2, ddf1, ddf2, ddf12, vtot;
real ra21, rb21, rg21, rg1, rgr1, ra2r1, rb2r1, rabr1;
real ra22, rb22, rg22, rg2, rgr2, ra2r2, rb2r2, rabr2;
real fg1, hg1, fga1, hgb1, gaa1, gbb1;
real fg2, hg2, fga2, hgb2, gaa2, gbb2;
real fac;
rvec r1_ij, r1_kj, r1_kl, m1, n1;
rvec r2_ij, r2_kj, r2_kl, m2, n2;
rvec f1_i, f1_j, f1_k, f1_l;
rvec f2_i, f2_j, f2_k, f2_l;
rvec a1, b1, a2, b2;
rvec f1, g1, h1, f2, g2, h2;
rvec dtf1, dtg1, dth1, dtf2, dtg2, dth2;
ivec jt1, dt1_ij, dt1_kj, dt1_lj;
ivec jt2, dt2_ij, dt2_kj, dt2_lj;
const real *cmapd;
int loop_index[4][4] = {
{0, 4, 8, 12},
{1, 5, 9, 13},
{2, 6, 10, 14},
{3, 7, 11, 15}
};
/* Total CMAP energy */
vtot = 0;
for (n = 0; n < nbonds; )
{
/* Five atoms are involved in the two torsions */
type = forceatoms[n++];
ai = forceatoms[n++];
aj = forceatoms[n++];
ak = forceatoms[n++];
al = forceatoms[n++];
am = forceatoms[n++];
/* Which CMAP type is this */
cmapA = forceparams[type].cmap.cmapA;
cmapd = cmap_grid->cmapdata[cmapA].cmap;
/* First torsion */
a1i = ai;
a1j = aj;
a1k = ak;
a1l = al;
phi1 = dih_angle(x[a1i], x[a1j], x[a1k], x[a1l], pbc, r1_ij, r1_kj, r1_kl, m1, n1,
&sign1, &t11, &t21, &t31); /* 84 */
cos_phi1 = cos(phi1);
a1[0] = r1_ij[1]*r1_kj[2]-r1_ij[2]*r1_kj[1];
a1[1] = r1_ij[2]*r1_kj[0]-r1_ij[0]*r1_kj[2];
a1[2] = r1_ij[0]*r1_kj[1]-r1_ij[1]*r1_kj[0]; /* 9 */
b1[0] = r1_kl[1]*r1_kj[2]-r1_kl[2]*r1_kj[1];
b1[1] = r1_kl[2]*r1_kj[0]-r1_kl[0]*r1_kj[2];
b1[2] = r1_kl[0]*r1_kj[1]-r1_kl[1]*r1_kj[0]; /* 9 */
tmp = pbc_rvec_sub(pbc, x[a1l], x[a1k], h1);
ra21 = iprod(a1, a1); /* 5 */
rb21 = iprod(b1, b1); /* 5 */
rg21 = iprod(r1_kj, r1_kj); /* 5 */
rg1 = sqrt(rg21);
rgr1 = 1.0/rg1;
ra2r1 = 1.0/ra21;
rb2r1 = 1.0/rb21;
rabr1 = sqrt(ra2r1*rb2r1);
sin_phi1 = rg1 * rabr1 * iprod(a1, h1) * (-1);
if (cos_phi1 < -0.5 || cos_phi1 > 0.5)
{
phi1 = asin(sin_phi1);
if (cos_phi1 < 0)
{
if (phi1 > 0)
{
phi1 = M_PI - phi1;
}
else
{
phi1 = -M_PI - phi1;
}
}
}
else
{
phi1 = acos(cos_phi1);
if (sin_phi1 < 0)
{
phi1 = -phi1;
}
}
xphi1 = phi1 + M_PI; /* 1 */
/* Second torsion */
a2i = aj;
a2j = ak;
a2k = al;
a2l = am;
phi2 = dih_angle(x[a2i], x[a2j], x[a2k], x[a2l], pbc, r2_ij, r2_kj, r2_kl, m2, n2,
&sign2, &t12, &t22, &t32); /* 84 */
cos_phi2 = cos(phi2);
a2[0] = r2_ij[1]*r2_kj[2]-r2_ij[2]*r2_kj[1];
a2[1] = r2_ij[2]*r2_kj[0]-r2_ij[0]*r2_kj[2];
a2[2] = r2_ij[0]*r2_kj[1]-r2_ij[1]*r2_kj[0]; /* 9 */
b2[0] = r2_kl[1]*r2_kj[2]-r2_kl[2]*r2_kj[1];
b2[1] = r2_kl[2]*r2_kj[0]-r2_kl[0]*r2_kj[2];
b2[2] = r2_kl[0]*r2_kj[1]-r2_kl[1]*r2_kj[0]; /* 9 */
tmp = pbc_rvec_sub(pbc, x[a2l], x[a2k], h2);
ra22 = iprod(a2, a2); /* 5 */
rb22 = iprod(b2, b2); /* 5 */
rg22 = iprod(r2_kj, r2_kj); /* 5 */
rg2 = sqrt(rg22);
rgr2 = 1.0/rg2;
ra2r2 = 1.0/ra22;
rb2r2 = 1.0/rb22;
rabr2 = sqrt(ra2r2*rb2r2);
sin_phi2 = rg2 * rabr2 * iprod(a2, h2) * (-1);
if (cos_phi2 < -0.5 || cos_phi2 > 0.5)
{
phi2 = asin(sin_phi2);
if (cos_phi2 < 0)
{
if (phi2 > 0)
{
phi2 = M_PI - phi2;
}
else
{
phi2 = -M_PI - phi2;
}
}
}
else
{
phi2 = acos(cos_phi2);
if (sin_phi2 < 0)
{
phi2 = -phi2;
}
}
xphi2 = phi2 + M_PI; /* 1 */
/* Range mangling */
if (xphi1 < 0)
{
xphi1 = xphi1 + 2*M_PI;
}
else if (xphi1 >= 2*M_PI)
{
xphi1 = xphi1 - 2*M_PI;
}
if (xphi2 < 0)
{
xphi2 = xphi2 + 2*M_PI;
}
else if (xphi2 >= 2*M_PI)
{
xphi2 = xphi2 - 2*M_PI;
}
/* Number of grid points */
dx = 2*M_PI / cmap_grid->grid_spacing;
/* Where on the grid are we */
iphi1 = (int)(xphi1/dx);
iphi2 = (int)(xphi2/dx);
iphi1 = cmap_setup_grid_index(iphi1, cmap_grid->grid_spacing, &ip1m1, &ip1p1, &ip1p2);
iphi2 = cmap_setup_grid_index(iphi2, cmap_grid->grid_spacing, &ip2m1, &ip2p1, &ip2p2);
pos1 = iphi1*cmap_grid->grid_spacing+iphi2;
pos2 = ip1p1*cmap_grid->grid_spacing+iphi2;
pos3 = ip1p1*cmap_grid->grid_spacing+ip2p1;
pos4 = iphi1*cmap_grid->grid_spacing+ip2p1;
ty[0] = cmapd[pos1*4];
ty[1] = cmapd[pos2*4];
ty[2] = cmapd[pos3*4];
ty[3] = cmapd[pos4*4];
ty1[0] = cmapd[pos1*4+1];
ty1[1] = cmapd[pos2*4+1];
ty1[2] = cmapd[pos3*4+1];
ty1[3] = cmapd[pos4*4+1];
ty2[0] = cmapd[pos1*4+2];
ty2[1] = cmapd[pos2*4+2];
ty2[2] = cmapd[pos3*4+2];
ty2[3] = cmapd[pos4*4+2];
ty12[0] = cmapd[pos1*4+3];
ty12[1] = cmapd[pos2*4+3];
ty12[2] = cmapd[pos3*4+3];
ty12[3] = cmapd[pos4*4+3];
/* Switch to degrees */
dx = 360.0 / cmap_grid->grid_spacing;
xphi1 = xphi1 * RAD2DEG;
xphi2 = xphi2 * RAD2DEG;
for (i = 0; i < 4; i++) /* 16 */
{
tx[i] = ty[i];
tx[i+4] = ty1[i]*dx;
tx[i+8] = ty2[i]*dx;
tx[i+12] = ty12[i]*dx*dx;
}
idx = 0;
for (i = 0; i < 4; i++) /* 1056 */
{
for (j = 0; j < 4; j++)
{
xx = 0;
for (k = 0; k < 16; k++)
{
xx = xx + cmap_coeff_matrix[k*16+idx]*tx[k];
}
idx++;
tc[i*4+j] = xx;
}
}
tt = (xphi1-iphi1*dx)/dx;
tu = (xphi2-iphi2*dx)/dx;
e = 0;
df1 = 0;
df2 = 0;
ddf1 = 0;
ddf2 = 0;
ddf12 = 0;
for (i = 3; i >= 0; i--)
{
l1 = loop_index[i][3];
l2 = loop_index[i][2];
l3 = loop_index[i][1];
e = tt * e + ((tc[i*4+3]*tu+tc[i*4+2])*tu + tc[i*4+1])*tu+tc[i*4];
df1 = tu * df1 + (3.0*tc[l1]*tt+2.0*tc[l2])*tt+tc[l3];
df2 = tt * df2 + (3.0*tc[i*4+3]*tu+2.0*tc[i*4+2])*tu+tc[i*4+1];
ddf1 = tu * ddf1 + 2.0*3.0*tc[l1]*tt+2.0*tc[l2];
ddf2 = tt * ddf2 + 2.0*3.0*tc[4*i+3]*tu+2.0*tc[4*i+2];
}
ddf12 = tc[5] + 2.0*tc[9]*tt + 3.0*tc[13]*tt*tt + 2.0*tu*(tc[6]+2.0*tc[10]*tt+3.0*tc[14]*tt*tt) +
3.0*tu*tu*(tc[7]+2.0*tc[11]*tt+3.0*tc[15]*tt*tt);
fac = RAD2DEG/dx;
df1 = df1 * fac;
df2 = df2 * fac;
ddf1 = ddf1 * fac * fac;
ddf2 = ddf2 * fac * fac;
ddf12 = ddf12 * fac * fac;
/* CMAP energy */
vtot += e;
/* Do forces - first torsion */
fg1 = iprod(r1_ij, r1_kj);
hg1 = iprod(r1_kl, r1_kj);
fga1 = fg1*ra2r1*rgr1;
hgb1 = hg1*rb2r1*rgr1;
gaa1 = -ra2r1*rg1;
gbb1 = rb2r1*rg1;
for (i = 0; i < DIM; i++)
{
dtf1[i] = gaa1 * a1[i];
dtg1[i] = fga1 * a1[i] - hgb1 * b1[i];
dth1[i] = gbb1 * b1[i];
f1[i] = df1 * dtf1[i];
g1[i] = df1 * dtg1[i];
h1[i] = df1 * dth1[i];
f1_i[i] = f1[i];
f1_j[i] = -f1[i] - g1[i];
f1_k[i] = h1[i] + g1[i];
f1_l[i] = -h1[i];
f[a1i][i] = f[a1i][i] + f1_i[i];
f[a1j][i] = f[a1j][i] + f1_j[i]; /* - f1[i] - g1[i] */
f[a1k][i] = f[a1k][i] + f1_k[i]; /* h1[i] + g1[i] */
f[a1l][i] = f[a1l][i] + f1_l[i]; /* h1[i] */
}
/* Do forces - second torsion */
fg2 = iprod(r2_ij, r2_kj);
hg2 = iprod(r2_kl, r2_kj);
fga2 = fg2*ra2r2*rgr2;
hgb2 = hg2*rb2r2*rgr2;
gaa2 = -ra2r2*rg2;
gbb2 = rb2r2*rg2;
for (i = 0; i < DIM; i++)
{
dtf2[i] = gaa2 * a2[i];
dtg2[i] = fga2 * a2[i] - hgb2 * b2[i];
dth2[i] = gbb2 * b2[i];
f2[i] = df2 * dtf2[i];
g2[i] = df2 * dtg2[i];
h2[i] = df2 * dth2[i];
f2_i[i] = f2[i];
f2_j[i] = -f2[i] - g2[i];
f2_k[i] = h2[i] + g2[i];
f2_l[i] = -h2[i];
f[a2i][i] = f[a2i][i] + f2_i[i]; /* f2[i] */
f[a2j][i] = f[a2j][i] + f2_j[i]; /* - f2[i] - g2[i] */
f[a2k][i] = f[a2k][i] + f2_k[i]; /* h2[i] + g2[i] */
f[a2l][i] = f[a2l][i] + f2_l[i]; /* - h2[i] */
}
/* Shift forces */
if (g)
{
copy_ivec(SHIFT_IVEC(g, a1j), jt1);
ivec_sub(SHIFT_IVEC(g, a1i), jt1, dt1_ij);
ivec_sub(SHIFT_IVEC(g, a1k), jt1, dt1_kj);
ivec_sub(SHIFT_IVEC(g, a1l), jt1, dt1_lj);
t11 = IVEC2IS(dt1_ij);
t21 = IVEC2IS(dt1_kj);
t31 = IVEC2IS(dt1_lj);
copy_ivec(SHIFT_IVEC(g, a2j), jt2);
ivec_sub(SHIFT_IVEC(g, a2i), jt2, dt2_ij);
ivec_sub(SHIFT_IVEC(g, a2k), jt2, dt2_kj);
ivec_sub(SHIFT_IVEC(g, a2l), jt2, dt2_lj);
t12 = IVEC2IS(dt2_ij);
t22 = IVEC2IS(dt2_kj);
t32 = IVEC2IS(dt2_lj);
}
else if (pbc)
{
t31 = pbc_rvec_sub(pbc, x[a1l], x[a1j], h1);
t32 = pbc_rvec_sub(pbc, x[a2l], x[a2j], h2);
}
else
{
t31 = CENTRAL;
t32 = CENTRAL;
}
rvec_inc(fshift[t11], f1_i);
rvec_inc(fshift[CENTRAL], f1_j);
rvec_inc(fshift[t21], f1_k);
rvec_inc(fshift[t31], f1_l);
rvec_inc(fshift[t21], f2_i);
rvec_inc(fshift[CENTRAL], f2_j);
rvec_inc(fshift[t22], f2_k);
rvec_inc(fshift[t32], f2_l);
}
return vtot;
}
/***********************************************************
*
* G R O M O S 9 6 F U N C T I O N S
*
***********************************************************/
real g96harmonic(real kA, real kB, real xA, real xB, real x, real lambda,
real *V, real *F)
{
const real half = 0.5;
real L1, kk, x0, dx, dx2;
real v, f, dvdlambda;
L1 = 1.0-lambda;
kk = L1*kA+lambda*kB;
x0 = L1*xA+lambda*xB;
dx = x-x0;
dx2 = dx*dx;
f = -kk*dx;
v = half*kk*dx2;
dvdlambda = half*(kB-kA)*dx2 + (xA-xB)*kk*dx;
*F = f;
*V = v;
return dvdlambda;
/* That was 21 flops */
}
real g96bonds(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ki, ai, aj, type;
real dr2, fbond, vbond, fij, vtot;
rvec dx;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
*dvdlambda += g96harmonic(forceparams[type].harmonic.krA,
forceparams[type].harmonic.krB,
forceparams[type].harmonic.rA,
forceparams[type].harmonic.rB,
dr2, lambda, &vbond, &fbond);
vtot += 0.5*vbond; /* 1*/
#ifdef DEBUG
if (debug)
{
fprintf(debug, "G96-BONDS: dr = %10g vbond = %10g fbond = %10g\n",
sqrt(dr2), vbond, fbond);
}
#endif
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 44 TOTAL */
return vtot;
}
real g96bond_angle(const rvec xi, const rvec xj, const rvec xk, const t_pbc *pbc,
rvec r_ij, rvec r_kj,
int *t1, int *t2)
/* Return value is the angle between the bonds i-j and j-k */
{
real costh;
*t1 = pbc_rvec_sub(pbc, xi, xj, r_ij); /* 3 */
*t2 = pbc_rvec_sub(pbc, xk, xj, r_kj); /* 3 */
costh = cos_angle(r_ij, r_kj); /* 25 */
/* 41 TOTAL */
return costh;
}
real g96angles(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, ai, aj, ak, type, m, t1, t2;
rvec r_ij, r_kj;
real cos_theta, dVdt, va, vtot;
real rij_1, rij_2, rkj_1, rkj_2, rijrkj_1;
rvec f_i, f_j, f_k;
ivec jt, dt_ij, dt_kj;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
cos_theta = g96bond_angle(x[ai], x[aj], x[ak], pbc, r_ij, r_kj, &t1, &t2);
*dvdlambda += g96harmonic(forceparams[type].harmonic.krA,
forceparams[type].harmonic.krB,
forceparams[type].harmonic.rA,
forceparams[type].harmonic.rB,
cos_theta, lambda, &va, &dVdt);
vtot += va;
rij_1 = gmx_invsqrt(iprod(r_ij, r_ij));
rkj_1 = gmx_invsqrt(iprod(r_kj, r_kj));
rij_2 = rij_1*rij_1;
rkj_2 = rkj_1*rkj_1;
rijrkj_1 = rij_1*rkj_1; /* 23 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "G96ANGLES: costheta = %10g vth = %10g dV/dct = %10g\n",
cos_theta, va, dVdt);
}
#endif
for (m = 0; (m < DIM); m++) /* 42 */
{
f_i[m] = dVdt*(r_kj[m]*rijrkj_1 - r_ij[m]*rij_2*cos_theta);
f_k[m] = dVdt*(r_ij[m]*rijrkj_1 - r_kj[m]*rkj_2*cos_theta);
f_j[m] = -f_i[m]-f_k[m];
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
if (g)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k); /* 9 */
/* 163 TOTAL */
}
return vtot;
}
real cross_bond_bond(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
/* Potential from Lawrence and Skimmer, Chem. Phys. Lett. 372 (2003)
* pp. 842-847
*/
int i, ai, aj, ak, type, m, t1, t2;
rvec r_ij, r_kj;
real vtot, vrr, s1, s2, r1, r2, r1e, r2e, krr;
rvec f_i, f_j, f_k;
ivec jt, dt_ij, dt_kj;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
r1e = forceparams[type].cross_bb.r1e;
r2e = forceparams[type].cross_bb.r2e;
krr = forceparams[type].cross_bb.krr;
/* Compute distance vectors ... */
t1 = pbc_rvec_sub(pbc, x[ai], x[aj], r_ij);
t2 = pbc_rvec_sub(pbc, x[ak], x[aj], r_kj);
/* ... and their lengths */
r1 = norm(r_ij);
r2 = norm(r_kj);
/* Deviations from ideality */
s1 = r1-r1e;
s2 = r2-r2e;
/* Energy (can be negative!) */
vrr = krr*s1*s2;
vtot += vrr;
/* Forces */
svmul(-krr*s2/r1, r_ij, f_i);
svmul(-krr*s1/r2, r_kj, f_k);
for (m = 0; (m < DIM); m++) /* 12 */
{
f_j[m] = -f_i[m] - f_k[m];
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
/* Virial stuff */
if (g)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k); /* 9 */
/* 163 TOTAL */
}
return vtot;
}
real cross_bond_angle(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
/* Potential from Lawrence and Skimmer, Chem. Phys. Lett. 372 (2003)
* pp. 842-847
*/
int i, ai, aj, ak, type, m, t1, t2, t3;
rvec r_ij, r_kj, r_ik;
real vtot, vrt, s1, s2, s3, r1, r2, r3, r1e, r2e, r3e, krt, k1, k2, k3;
rvec f_i, f_j, f_k;
ivec jt, dt_ij, dt_kj;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
r1e = forceparams[type].cross_ba.r1e;
r2e = forceparams[type].cross_ba.r2e;
r3e = forceparams[type].cross_ba.r3e;
krt = forceparams[type].cross_ba.krt;
/* Compute distance vectors ... */
t1 = pbc_rvec_sub(pbc, x[ai], x[aj], r_ij);
t2 = pbc_rvec_sub(pbc, x[ak], x[aj], r_kj);
t3 = pbc_rvec_sub(pbc, x[ai], x[ak], r_ik);
/* ... and their lengths */
r1 = norm(r_ij);
r2 = norm(r_kj);
r3 = norm(r_ik);
/* Deviations from ideality */
s1 = r1-r1e;
s2 = r2-r2e;
s3 = r3-r3e;
/* Energy (can be negative!) */
vrt = krt*s3*(s1+s2);
vtot += vrt;
/* Forces */
k1 = -krt*(s3/r1);
k2 = -krt*(s3/r2);
k3 = -krt*(s1+s2)/r3;
for (m = 0; (m < DIM); m++)
{
f_i[m] = k1*r_ij[m] + k3*r_ik[m];
f_k[m] = k2*r_kj[m] - k3*r_ik[m];
f_j[m] = -f_i[m] - f_k[m];
}
for (m = 0; (m < DIM); m++) /* 12 */
{
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
/* Virial stuff */
if (g)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k); /* 9 */
/* 163 TOTAL */
}
return vtot;
}
static real bonded_tab(const char *type, int table_nr,
const bondedtable_t *table, real kA, real kB, real r,
real lambda, real *V, real *F)
{
real k, tabscale, *VFtab, rt, eps, eps2, Yt, Ft, Geps, Heps2, Fp, VV, FF;
int n0, nnn;
real v, f, dvdlambda;
k = (1.0 - lambda)*kA + lambda*kB;
tabscale = table->scale;
VFtab = table->data;
rt = r*tabscale;
n0 = rt;
if (n0 >= table->n)
{
gmx_fatal(FARGS, "A tabulated %s interaction table number %d is out of the table range: r %f, between table indices %d and %d, table length %d",
type, table_nr, r, n0, n0+1, table->n);
}
eps = rt - n0;
eps2 = eps*eps;
nnn = 4*n0;
Yt = VFtab[nnn];
Ft = VFtab[nnn+1];
Geps = VFtab[nnn+2]*eps;
Heps2 = VFtab[nnn+3]*eps2;
Fp = Ft + Geps + Heps2;
VV = Yt + Fp*eps;
FF = Fp + Geps + 2.0*Heps2;
*F = -k*FF*tabscale;
*V = k*VV;
dvdlambda = (kB - kA)*VV;
return dvdlambda;
/* That was 22 flops */
}
real tab_bonds(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, m, ki, ai, aj, type, table;
real dr, dr2, fbond, vbond, fij, vtot;
rvec dx;
ivec dt;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */
dr2 = iprod(dx, dx); /* 5 */
dr = dr2*gmx_invsqrt(dr2); /* 10 */
table = forceparams[type].tab.table;
*dvdlambda += bonded_tab("bond", table,
&fcd->bondtab[table],
forceparams[type].tab.kA,
forceparams[type].tab.kB,
dr, lambda, &vbond, &fbond); /* 22 */
if (dr2 == 0.0)
{
continue;
}
vtot += vbond; /* 1*/
fbond *= gmx_invsqrt(dr2); /* 6 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "TABBONDS: dr = %10g vbond = %10g fbond = %10g\n",
dr, vbond, fbond);
}
#endif
if (g)
{
ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt);
ki = IVEC2IS(dt);
}
for (m = 0; (m < DIM); m++) /* 15 */
{
fij = fbond*dx[m];
f[ai][m] += fij;
f[aj][m] -= fij;
fshift[ki][m] += fij;
fshift[CENTRAL][m] -= fij;
}
} /* 62 TOTAL */
return vtot;
}
real tab_angles(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, ai, aj, ak, t1, t2, type, table;
rvec r_ij, r_kj;
real cos_theta, cos_theta2, theta, dVdt, va, vtot;
ivec jt, dt_ij, dt_kj;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
theta = bond_angle(x[ai], x[aj], x[ak], pbc,
r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */
table = forceparams[type].tab.table;
*dvdlambda += bonded_tab("angle", table,
&fcd->angletab[table],
forceparams[type].tab.kA,
forceparams[type].tab.kB,
theta, lambda, &va, &dVdt); /* 22 */
vtot += va;
cos_theta2 = sqr(cos_theta); /* 1 */
if (cos_theta2 < 1)
{
int m;
real snt, st, sth;
real cik, cii, ckk;
real nrkj2, nrij2;
rvec f_i, f_j, f_k;
st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */
sth = st*cos_theta; /* 1 */
#ifdef DEBUG
if (debug)
{
fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n",
theta*RAD2DEG, va, dVdt);
}
#endif
nrkj2 = iprod(r_kj, r_kj); /* 5 */
nrij2 = iprod(r_ij, r_ij);
cik = st*gmx_invsqrt(nrkj2*nrij2); /* 12 */
cii = sth/nrij2; /* 10 */
ckk = sth/nrkj2; /* 10 */
for (m = 0; (m < DIM); m++) /* 39 */
{
f_i[m] = -(cik*r_kj[m]-cii*r_ij[m]);
f_k[m] = -(cik*r_ij[m]-ckk*r_kj[m]);
f_j[m] = -f_i[m]-f_k[m];
f[ai][m] += f_i[m];
f[aj][m] += f_j[m];
f[ak][m] += f_k[m];
}
if (g)
{
copy_ivec(SHIFT_IVEC(g, aj), jt);
ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij);
ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj);
t1 = IVEC2IS(dt_ij);
t2 = IVEC2IS(dt_kj);
}
rvec_inc(fshift[t1], f_i);
rvec_inc(fshift[CENTRAL], f_j);
rvec_inc(fshift[t2], f_k);
} /* 169 TOTAL */
}
return vtot;
}
real tab_dihs(int nbonds,
const t_iatom forceatoms[], const t_iparams forceparams[],
const rvec x[], rvec f[], rvec fshift[],
const t_pbc *pbc, const t_graph *g,
real lambda, real *dvdlambda,
const t_mdatoms *md, t_fcdata *fcd,
int *global_atom_index)
{
int i, type, ai, aj, ak, al, table;
int t1, t2, t3;
rvec r_ij, r_kj, r_kl, m, n;
real phi, sign, ddphi, vpd, vtot;
vtot = 0.0;
for (i = 0; (i < nbonds); )
{
type = forceatoms[i++];
ai = forceatoms[i++];
aj = forceatoms[i++];
ak = forceatoms[i++];
al = forceatoms[i++];
phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n,
&sign, &t1, &t2, &t3); /* 84 */
table = forceparams[type].tab.table;
/* Hopefully phi+M_PI never results in values < 0 */
*dvdlambda += bonded_tab("dihedral", table,
&fcd->dihtab[table],
forceparams[type].tab.kA,
forceparams[type].tab.kB,
phi+M_PI, lambda, &vpd, &ddphi);
vtot += vpd;
do_dih_fup(ai, aj, ak, al, -ddphi, r_ij, r_kj, r_kl, m, n,
f, fshift, pbc, g, x, t1, t2, t3); /* 112 */
#ifdef DEBUG
fprintf(debug, "pdih: (%d,%d,%d,%d) phi=%g\n",
ai, aj, ak, al, phi);
#endif
} /* 227 TOTAL */
return vtot;
}
/* Return if this is a potential calculated in bondfree.c,
* i.e. an interaction that actually calculates a potential and
* works on multiple atoms (not e.g. a connection or a position restraint).
*/
static gmx_inline gmx_bool ftype_is_bonded_potential(int ftype)
{
return
(interaction_function[ftype].flags & IF_BOND) &&
!(ftype == F_CONNBONDS || ftype == F_POSRES) &&
(ftype < F_GB12 || ftype > F_GB14);
}
static void divide_bondeds_over_threads(t_idef *idef, int nthreads)
{
int ftype;
int nat1;
int t;
int il_nr_thread;
idef->nthreads = nthreads;
if (F_NRE*(nthreads+1) > idef->il_thread_division_nalloc)
{
idef->il_thread_division_nalloc = F_NRE*(nthreads+1);
snew(idef->il_thread_division, idef->il_thread_division_nalloc);
}
for (ftype = 0; ftype < F_NRE; ftype++)
{
if (ftype_is_bonded_potential(ftype))
{
nat1 = interaction_function[ftype].nratoms + 1;
for (t = 0; t <= nthreads; t++)
{
/* Divide the interactions equally over the threads.
* When the different types of bonded interactions
* are distributed roughly equally over the threads,
* this should lead to well localized output into
* the force buffer on each thread.
* If this is not the case, a more advanced scheme
* (not implemented yet) will do better.
*/
il_nr_thread = (((idef->il[ftype].nr/nat1)*t)/nthreads)*nat1;
/* Ensure that distance restraint pairs with the same label
* end up on the same thread.
* This is slighlty tricky code, since the next for iteration
* may have an initial il_nr_thread lower than the final value
* in the previous iteration, but this will anyhow be increased
* to the approriate value again by this while loop.
*/
while (ftype == F_DISRES &&
il_nr_thread > 0 &&
il_nr_thread < idef->il[ftype].nr &&
idef->iparams[idef->il[ftype].iatoms[il_nr_thread]].disres.label ==
idef->iparams[idef->il[ftype].iatoms[il_nr_thread-nat1]].disres.label)
{
il_nr_thread += nat1;
}
idef->il_thread_division[ftype*(nthreads+1)+t] = il_nr_thread;
}
}
}
}
static unsigned
calc_bonded_reduction_mask(const t_idef *idef,
int shift,
int t, int nt)
{
unsigned mask;
int ftype, nb, nat1, nb0, nb1, i, a;
mask = 0;
for (ftype = 0; ftype < F_NRE; ftype++)
{
if (ftype_is_bonded_potential(ftype))
{
nb = idef->il[ftype].nr;
if (nb > 0)
{
nat1 = interaction_function[ftype].nratoms + 1;
/* Divide this interaction equally over the threads.
* This is not stored: should match division in calc_bonds.
*/
nb0 = idef->il_thread_division[ftype*(nt+1)+t];
nb1 = idef->il_thread_division[ftype*(nt+1)+t+1];
for (i = nb0; i < nb1; i += nat1)
{
for (a = 1; a < nat1; a++)
{
mask |= (1U << (idef->il[ftype].iatoms[i+a]>>shift));
}
}
}
}
}
return mask;
}
void setup_bonded_threading(t_forcerec *fr, t_idef *idef)
{
#define MAX_BLOCK_BITS 32
int t;
int ctot, c, b;
#ifndef NDEBUG
assert(fr->nthreads >= 1);
#endif
/* Divide the bonded interaction over the threads */
divide_bondeds_over_threads(idef, fr->nthreads);
if (fr->nthreads == 1)
{
fr->red_nblock = 0;
return;
}
/* We divide the force array in a maximum of 32 blocks.
* Minimum force block reduction size is 2^6=64.
*/
fr->red_ashift = 6;
while (fr->natoms_force > (int)(MAX_BLOCK_BITS*(1U<<fr->red_ashift)))
{
fr->red_ashift++;
}
if (debug)
{
fprintf(debug, "bonded force buffer block atom shift %d bits\n",
fr->red_ashift);
}
/* Determine to which blocks each thread's bonded force calculation
* contributes. Store this is a mask for each thread.
*/
#pragma omp parallel for num_threads(fr->nthreads) schedule(static)
for (t = 1; t < fr->nthreads; t++)
{
fr->f_t[t].red_mask =
calc_bonded_reduction_mask(idef, fr->red_ashift, t, fr->nthreads);
}
/* Determine the maximum number of blocks we need to reduce over */
fr->red_nblock = 0;
ctot = 0;
for (t = 0; t < fr->nthreads; t++)
{
c = 0;
for (b = 0; b < MAX_BLOCK_BITS; b++)
{
if (fr->f_t[t].red_mask & (1U<<b))
{
fr->red_nblock = max(fr->red_nblock, b+1);
c++;
}
}
if (debug)
{
fprintf(debug, "thread %d flags %x count %d\n",
t, fr->f_t[t].red_mask, c);
}
ctot += c;
}
if (debug)
{
fprintf(debug, "Number of blocks to reduce: %d of size %d\n",
fr->red_nblock, 1<<fr->red_ashift);
fprintf(debug, "Reduction density %.2f density/#thread %.2f\n",
ctot*(1<<fr->red_ashift)/(double)fr->natoms_force,
ctot*(1<<fr->red_ashift)/(double)(fr->natoms_force*fr->nthreads));
}
}
static void zero_thread_forces(f_thread_t *f_t, int n,
int nblock, int blocksize)
{
int b, a0, a1, a, i, j;
if (n > f_t->f_nalloc)
{
f_t->f_nalloc = over_alloc_large(n);
srenew(f_t->f, f_t->f_nalloc);
}
if (f_t->red_mask != 0)
{
for (b = 0; b < nblock; b++)
{
if (f_t->red_mask && (1U<<b))
{
a0 = b*blocksize;
a1 = min((b+1)*blocksize, n);
for (a = a0; a < a1; a++)
{
clear_rvec(f_t->f[a]);
}
}
}
}
for (i = 0; i < SHIFTS; i++)
{
clear_rvec(f_t->fshift[i]);
}
for (i = 0; i < F_NRE; i++)
{
f_t->ener[i] = 0;
}
for (i = 0; i < egNR; i++)
{
for (j = 0; j < f_t->grpp.nener; j++)
{
f_t->grpp.ener[i][j] = 0;
}
}
for (i = 0; i < efptNR; i++)
{
f_t->dvdl[i] = 0;
}
}
static void reduce_thread_force_buffer(int n, rvec *f,
int nthreads, f_thread_t *f_t,
int nblock, int block_size)
{
/* The max thread number is arbitrary,
* we used a fixed number to avoid memory management.
* Using more than 16 threads is probably never useful performance wise.
*/
#define MAX_BONDED_THREADS 256
int b;
if (nthreads > MAX_BONDED_THREADS)
{
gmx_fatal(FARGS, "Can not reduce bonded forces on more than %d threads",
MAX_BONDED_THREADS);
}
/* This reduction can run on any number of threads,
* independently of nthreads.
*/
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (b = 0; b < nblock; b++)
{
rvec *fp[MAX_BONDED_THREADS];
int nfb, ft, fb;
int a0, a1, a;
/* Determine which threads contribute to this block */
nfb = 0;
for (ft = 1; ft < nthreads; ft++)
{
if (f_t[ft].red_mask & (1U<<b))
{
fp[nfb++] = f_t[ft].f;
}
}
if (nfb > 0)
{
/* Reduce force buffers for threads that contribute */
a0 = b *block_size;
a1 = (b+1)*block_size;
a1 = min(a1, n);
for (a = a0; a < a1; a++)
{
for (fb = 0; fb < nfb; fb++)
{
rvec_inc(f[a], fp[fb][a]);
}
}
}
}
}
static void reduce_thread_forces(int n, rvec *f, rvec *fshift,
real *ener, gmx_grppairener_t *grpp, real *dvdl,
int nthreads, f_thread_t *f_t,
int nblock, int block_size,
gmx_bool bCalcEnerVir,
gmx_bool bDHDL)
{
if (nblock > 0)
{
/* Reduce the bonded force buffer */
reduce_thread_force_buffer(n, f, nthreads, f_t, nblock, block_size);
}
/* When necessary, reduce energy and virial using one thread only */
if (bCalcEnerVir)
{
int t, i, j;
for (i = 0; i < SHIFTS; i++)
{
for (t = 1; t < nthreads; t++)
{
rvec_inc(fshift[i], f_t[t].fshift[i]);
}
}
for (i = 0; i < F_NRE; i++)
{
for (t = 1; t < nthreads; t++)
{
ener[i] += f_t[t].ener[i];
}
}
for (i = 0; i < egNR; i++)
{
for (j = 0; j < f_t[1].grpp.nener; j++)
{
for (t = 1; t < nthreads; t++)
{
grpp->ener[i][j] += f_t[t].grpp.ener[i][j];
}
}
}
if (bDHDL)
{
for (i = 0; i < efptNR; i++)
{
for (t = 1; t < nthreads; t++)
{
dvdl[i] += f_t[t].dvdl[i];
}
}
}
}
}
static real calc_one_bond(FILE *fplog, int thread,
int ftype, const t_idef *idef,
rvec x[], rvec f[], rvec fshift[],
t_forcerec *fr,
const t_pbc *pbc, const t_graph *g,
gmx_grppairener_t *grpp,
t_nrnb *nrnb,
real *lambda, real *dvdl,
const t_mdatoms *md, t_fcdata *fcd,
gmx_bool bCalcEnerVir,
int *global_atom_index, gmx_bool bPrintSepPot)
{
int nat1, nbonds, efptFTYPE;
real v = 0;
t_iatom *iatoms;
int nb0, nbn;
if (IS_RESTRAINT_TYPE(ftype))
{
efptFTYPE = efptRESTRAINT;
}
else
{
efptFTYPE = efptBONDED;
}
nat1 = interaction_function[ftype].nratoms + 1;
nbonds = idef->il[ftype].nr/nat1;
iatoms = idef->il[ftype].iatoms;
nb0 = idef->il_thread_division[ftype*(idef->nthreads+1)+thread];
nbn = idef->il_thread_division[ftype*(idef->nthreads+1)+thread+1] - nb0;
if (!IS_LISTED_LJ_C(ftype))
{
if (ftype == F_CMAP)
{
v = cmap_dihs(nbn, iatoms+nb0,
idef->iparams, &idef->cmap_grid,
(const rvec*)x, f, fshift,
pbc, g, lambda[efptFTYPE], &(dvdl[efptFTYPE]),
md, fcd, global_atom_index);
}
#ifdef SIMD_BONDEDS
else if (ftype == F_ANGLES &&
!bCalcEnerVir && fr->efep == efepNO)
{
/* No energies, shift forces, dvdl */
angles_noener_simd(nbn, idef->il[ftype].iatoms+nb0,
idef->iparams,
(const rvec*)x, f,
pbc, g, lambda[efptFTYPE], md, fcd,
global_atom_index);
v = 0;
}
#endif
else if (ftype == F_PDIHS &&
!bCalcEnerVir && fr->efep == efepNO)
{
/* No energies, shift forces, dvdl */
#ifndef SIMD_BONDEDS
pdihs_noener
#else
pdihs_noener_simd
#endif
(nbn, idef->il[ftype].iatoms+nb0,
idef->iparams,
(const rvec*)x, f,
pbc, g, lambda[efptFTYPE], md, fcd,
global_atom_index);
v = 0;
}
else
{
v = interaction_function[ftype].ifunc(nbn, iatoms+nb0,
idef->iparams,
(const rvec*)x, f, fshift,
pbc, g, lambda[efptFTYPE], &(dvdl[efptFTYPE]),
md, fcd, global_atom_index);
}
if (bPrintSepPot)
{
fprintf(fplog, " %-23s #%4d V %12.5e dVdl %12.5e\n",
interaction_function[ftype].longname,
nbonds, v, lambda[efptFTYPE]);
}
}
else
{
v = do_nonbonded_listed(ftype, nbn, iatoms+nb0, idef->iparams, (const rvec*)x, f, fshift,
pbc, g, lambda, dvdl, md, fr, grpp, global_atom_index);
if (bPrintSepPot)
{
fprintf(fplog, " %-5s + %-15s #%4d dVdl %12.5e\n",
interaction_function[ftype].longname,
interaction_function[F_LJ14].longname, nbonds, dvdl[efptVDW]);
fprintf(fplog, " %-5s + %-15s #%4d dVdl %12.5e\n",
interaction_function[ftype].longname,
interaction_function[F_COUL14].longname, nbonds, dvdl[efptCOUL]);
}
}
if (thread == 0)
{
inc_nrnb(nrnb, interaction_function[ftype].nrnb_ind, nbonds);
}
return v;
}
void calc_bonds(FILE *fplog, const gmx_multisim_t *ms,
const t_idef *idef,
rvec x[], history_t *hist,
rvec f[], t_forcerec *fr,
const t_pbc *pbc, const t_graph *g,
gmx_enerdata_t *enerd, t_nrnb *nrnb,
real *lambda,
const t_mdatoms *md,
t_fcdata *fcd, int *global_atom_index,
t_atomtypes *atype, gmx_genborn_t *born,
int force_flags,
gmx_bool bPrintSepPot, gmx_large_int_t step)
{
gmx_bool bCalcEnerVir;
int i;
real v, dvdl[efptNR], dvdl_dum[efptNR]; /* The dummy array is to have a place to store the dhdl at other values
of lambda, which will be thrown away in the end*/
const t_pbc *pbc_null;
char buf[22];
int thread;
#ifndef NDEBUG
assert(fr->nthreads == idef->nthreads);
#endif
bCalcEnerVir = (force_flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY));
for (i = 0; i < efptNR; i++)
{
dvdl[i] = 0.0;
}
if (fr->bMolPBC)
{
pbc_null = pbc;
}
else
{
pbc_null = NULL;
}
if (bPrintSepPot)
{
fprintf(fplog, "Step %s: bonded V and dVdl for this node\n",
gmx_step_str(step, buf));
}
#ifdef DEBUG
if (g && debug)
{
p_graph(debug, "Bondage is fun", g);
}
#endif
/* Do pre force calculation stuff which might require communication */
if (idef->il[F_ORIRES].nr)
{
enerd->term[F_ORIRESDEV] =
calc_orires_dev(ms, idef->il[F_ORIRES].nr,
idef->il[F_ORIRES].iatoms,
idef->iparams, md, (const rvec*)x,
pbc_null, fcd, hist);
}
if (idef->il[F_DISRES].nr)
{
calc_disres_R_6(ms, idef->il[F_DISRES].nr,
idef->il[F_DISRES].iatoms,
idef->iparams, (const rvec*)x, pbc_null,
fcd, hist);
}
#pragma omp parallel for num_threads(fr->nthreads) schedule(static)
for (thread = 0; thread < fr->nthreads; thread++)
{
int ftype;
real *epot, v;
/* thread stuff */
rvec *ft, *fshift;
real *dvdlt;
gmx_grppairener_t *grpp;
if (thread == 0)
{
ft = f;
fshift = fr->fshift;
epot = enerd->term;
grpp = &enerd->grpp;
dvdlt = dvdl;
}
else
{
zero_thread_forces(&fr->f_t[thread], fr->natoms_force,
fr->red_nblock, 1<<fr->red_ashift);
ft = fr->f_t[thread].f;
fshift = fr->f_t[thread].fshift;
epot = fr->f_t[thread].ener;
grpp = &fr->f_t[thread].grpp;
dvdlt = fr->f_t[thread].dvdl;
}
/* Loop over all bonded force types to calculate the bonded forces */
for (ftype = 0; (ftype < F_NRE); ftype++)
{
if (idef->il[ftype].nr > 0 && ftype_is_bonded_potential(ftype))
{
v = calc_one_bond(fplog, thread, ftype, idef, x,
ft, fshift, fr, pbc_null, g, grpp,
nrnb, lambda, dvdlt,
md, fcd, bCalcEnerVir,
global_atom_index, bPrintSepPot);
epot[ftype] += v;
}
}
}
if (fr->nthreads > 1)
{
reduce_thread_forces(fr->natoms_force, f, fr->fshift,
enerd->term, &enerd->grpp, dvdl,
fr->nthreads, fr->f_t,
fr->red_nblock, 1<<fr->red_ashift,
bCalcEnerVir,
force_flags & GMX_FORCE_DHDL);
}
if (force_flags & GMX_FORCE_DHDL)
{
for (i = 0; i < efptNR; i++)
{
enerd->dvdl_nonlin[i] += dvdl[i];
}
}
/* Copy the sum of violations for the distance restraints from fcd */
if (fcd)
{
enerd->term[F_DISRESVIOL] = fcd->disres.sumviol;
}
}
void calc_bonds_lambda(FILE *fplog,
const t_idef *idef,
rvec x[],
t_forcerec *fr,
const t_pbc *pbc, const t_graph *g,
gmx_grppairener_t *grpp, real *epot, t_nrnb *nrnb,
real *lambda,
const t_mdatoms *md,
t_fcdata *fcd,
int *global_atom_index)
{
int i, ftype, nr_nonperturbed, nr;
real v;
real dvdl_dum[efptNR];
rvec *f, *fshift;
const t_pbc *pbc_null;
t_idef idef_fe;
if (fr->bMolPBC)
{
pbc_null = pbc;
}
else
{
pbc_null = NULL;
}
/* Copy the whole idef, so we can modify the contents locally */
idef_fe = *idef;
idef_fe.nthreads = 1;
snew(idef_fe.il_thread_division, F_NRE*(idef_fe.nthreads+1));
/* We already have the forces, so we use temp buffers here */
snew(f, fr->natoms_force);
snew(fshift, SHIFTS);
/* Loop over all bonded force types to calculate the bonded energies */
for (ftype = 0; (ftype < F_NRE); ftype++)
{
if (ftype_is_bonded_potential(ftype))
{
/* Set the work range of thread 0 to the perturbed bondeds only */
nr_nonperturbed = idef->il[ftype].nr_nonperturbed;
nr = idef->il[ftype].nr;
idef_fe.il_thread_division[ftype*2+0] = nr_nonperturbed;
idef_fe.il_thread_division[ftype*2+1] = nr;
/* This is only to get the flop count correct */
idef_fe.il[ftype].nr = nr - nr_nonperturbed;
if (nr - nr_nonperturbed > 0)
{
v = calc_one_bond(fplog, 0, ftype, &idef_fe,
x, f, fshift, fr, pbc_null, g,
grpp, nrnb, lambda, dvdl_dum,
md, fcd, TRUE,
global_atom_index, FALSE);
epot[ftype] += v;
}
}
}
sfree(fshift);
sfree(f);
sfree(idef_fe.il_thread_division);
}
|
GB_unaryop__minv_uint16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_fp32
// op(A') function: GB_tran__minv_uint16_fp32
// C type: uint16_t
// A type: float
// cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16)
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_fp32
(
uint16_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ANC.h | /*******************************************************************************
The block below describes the properties of this PIP. A PIP is a short snippet
of code that can be read by the Projucer and used to generate a JUCE project.
BEGIN_JUCE_PIP_METADATA
name: Active Noise Cancelling
version: 1.0.0
vendor: Michał Berdzik
website:
description: Perform ANC on collected data
dependencies: juce_audio_basics, juce_audio_devices, juce_audio_formats,
juce_audio_processors, juce_audio_utils, juce_core,
juce_data_structures, juce_events, juce_graphics,
juce_gui_basics, juce_gui_extra
exporters: vs2017, linux_make
moduleFlags: JUCE_STRICT_REFCOUNTEDPOINTER=1
type: Component
mainClass: ANCInstance
useLocalCopy: 1
END_JUCE_PIP_METADATA
*******************************************************************************/
#pragma once
#include "../Assets/DemoUtilities.h"
#include "../Assets/AudioLiveScrollingDisplay.h"
#include "../DemoRunner/Source/SpectrumAnalyser.h"
#include "../DemoRunner/Source/FilterVisualizer.h"
#include "../DemoRunner/Source/ARM_NLMS.h"
#include "../DemoRunner/Source/config.h"
#include "../DemoRunner/Source/pa_ringbuffer.h"
#include <future>
//#include <omp.h>
#include <string.h>
//==============================================================================
/** A simple class that acts as an AudioIODeviceCallback and writes the
incoming audio data to a WAV file.
*/
class AudioRecorder : public AudioIODeviceCallback
{
public:
AudioRecorder(AudioThumbnail& thumbnailToUpdate)
: thumbnail(thumbnailToUpdate)
{
backgroundThread.startThread();
}
~AudioRecorder() override
{
stop();
}
//==============================================================================
void startRecording(const File& file)
{
stop();
if (sampleRate > 0)
{
// Create an OutputStream to write to our destination file...
file.deleteFile();
if (auto fileStream = std::unique_ptr<FileOutputStream>(file.createOutputStream()))
{
// Now create a WAV writer object that writes to our output stream...
WavAudioFormat wavFormat;
if (auto writer = wavFormat.createWriterFor(fileStream.get(), sampleRate, 2, 16, {}, 0))
{
fileStream.release(); // (passes responsibility for deleting the stream to the writer object that is now using it)
// Now we'll create one of these helper objects which will act as a FIFO buffer, and will
// write the data to disk on our background thread.
threadedWriter.reset(new AudioFormatWriter::ThreadedWriter(writer, backgroundThread, 32768));
// Reset our recording thumbnail
thumbnail.reset(writer->getNumChannels(), writer->getSampleRate());
nextSampleNum = 0;
// And now, swap over our active writer pointer so that the audio callback will start using it..
const ScopedLock sl(writerLock);
activeWriter = threadedWriter.get();
}
}
}
}
void stop()
{
// First, clear this pointer to stop the audio callback from using our writer object..
{
const ScopedLock sl(writerLock);
activeWriter = nullptr;
}
// Now we can delete the writer object. It's done in this order because the deletion could
// take a little time while remaining data gets flushed to disk, so it's best to avoid blocking
// the audio callback while this happens.
threadedWriter.reset();
}
bool isRecording() const
{
return activeWriter.load() != nullptr;
}
//==============================================================================
void audioDeviceAboutToStart(AudioIODevice* device) override
{
sampleRate = device->getCurrentSampleRate();
}
void audioDeviceStopped() override
{
sampleRate = 0;
}
void audioDeviceIOCallback(const float** inputChannelData, int numInputChannels,
float** outputChannelData, int numOutputChannels,
int numSamples) override
{
(void)numOutputChannels;
(void)outputChannelData;
const ScopedLock sl(writerLock);
if (activeWriter.load() != nullptr && numInputChannels >= thumbnail.getNumChannels())
{
activeWriter.load()->write(inputChannelData, numSamples);
// Create an AudioBuffer to wrap our incoming data, note that this does no allocations or copies, it simply references our input data
AudioBuffer<float> buffer(const_cast<float**> (inputChannelData), thumbnail.getNumChannels(), numSamples);
thumbnail.addBlock(nextSampleNum, buffer, 0, numSamples);
nextSampleNum += numSamples;
}
}
private:
AudioThumbnail& thumbnail;
TimeSliceThread backgroundThread{ "Audio Recorder Thread" }; // the thread that will write our audio data to disk
std::unique_ptr<AudioFormatWriter::ThreadedWriter> threadedWriter; // the FIFO used to buffer the incoming data
double sampleRate = 0.0;
int64 nextSampleNum = 0;
CriticalSection writerLock;
std::atomic<AudioFormatWriter::ThreadedWriter*> activeWriter{ nullptr };
};
//==============================================================================
class RecordingThumbnail : public Component,
private ChangeListener
{
public:
RecordingThumbnail()
{
formatManager.registerBasicFormats();
thumbnail.addChangeListener(this);
}
~RecordingThumbnail() override
{
thumbnail.removeChangeListener(this);
}
AudioThumbnail& getAudioThumbnail() { return thumbnail; }
void setDisplayFullThumbnail(bool displayFull)
{
displayFullThumb = displayFull;
repaint();
}
void paint(Graphics& g) override
{
g.fillAll(Colours::darkgrey);
g.setColour(Colours::lightgrey);
if (thumbnail.getTotalLength() > 0.0)
{
auto endTime = displayFullThumb ? thumbnail.getTotalLength()
: jmax(30.0, thumbnail.getTotalLength());
auto thumbArea = getLocalBounds();
thumbnail.drawChannels(g, thumbArea.reduced(2), 0.0, endTime, 1.0f);
}
else
{
g.setFont(14.0f);
g.drawFittedText("(No file recorded)", getLocalBounds(), Justification::centred, 2);
}
}
private:
AudioFormatManager formatManager;
AudioThumbnailCache thumbnailCache{ 10 };
AudioThumbnail thumbnail{ 512, formatManager, thumbnailCache };
bool displayFullThumb = false;
void changeListenerCallback(ChangeBroadcaster* source) override
{
if (source == &thumbnail)
repaint();
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(RecordingThumbnail)
};
//==============================================================================
class ANCInstance : public AudioIODeviceCallback,
private Thread
{
public:
/***************************************************************************//**
* @brief Default constructor of ANCInstance class
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
ANCInstance() :Thread("NLMS Processing Thread")
{
setPriority(realtimeAudioPriority);
}
/***************************************************************************//**
* @brief Parametrized constructor of ANCInstance class
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
ANCInstance(int _filterSize, float _muVal) :Thread("NLMS Processing Thread")
{
filterSize = _filterSize;
muValue = _muVal;
setPriority(realtimeAudioPriority);
}
/***************************************************************************//**
* @brief Default destructor of ANCInstance class
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param Reference to graphics module
* @return
******************************************************************************/
~ANCInstance() {
stopThread(-1);
}
/***************************************************************************//**
* @brief Overriden function of Thread's run() function
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
void run() override
{
volatile ring_buffer_size_t availableSamples_L = 0;
volatile ring_buffer_size_t readedSamples_L = 0;
volatile ring_buffer_size_t availableSamples_R = 0;
volatile ring_buffer_size_t readedSamples_R = 0;
volatile ring_buffer_size_t processedSamples = 0;
volatile float buffer_L[FRAMES_PER_BUFFER * 4];
volatile float *bufferPtr_L = (float *)buffer_L;
volatile float buffer_R[FRAMES_PER_BUFFER * 4];
volatile float *bufferPtr_R = (float *)buffer_R;
volatile float buffer[FRAMES_PER_BUFFER * 4];
volatile float *bufferPtr = (float *)buffer;
volatile int sampleDelay = 0;
volatile int sampleCount = 48000 * 20;
while (!threadShouldExit())
{
#pragma omp parallel sections
{
#pragma omp section
{
availableSamples_L = PaUtil_GetRingBufferReadAvailable(&ringBufferIn_L);
}
if (availableSamples_L >= numOfSamples)
{
//pthread_mutex_lock( &count_mutex );
readedSamples_L = PaUtil_ReadRingBuffer(&ringBufferIn_L, (float*)bufferPtr_L, numOfSamples);
readedSamples_R = PaUtil_ReadRingBuffer(&ringBufferIn_R, (float*)bufferPtr_R, numOfSamples);
//do processing here
//monoIIRHP.processSamples((float*)bufferPtr_L, readedSamples_L);
//monoIIRHP.processSamples((float*)bufferPtr_R, readedSamples_R);
// monoIIR.processSamples((float*)bufferPtr_L, readedSamples_L);
// monoIIR.processSamples((float*)bufferPtr_R, readedSamples_L);
sampleDelay += readedSamples_L;
if (sampleDelay > FRAMES_PER_BUFFER)
{
if (sampleCount > 0)
{
for (int n = 0; n < readedSamples_L; n++)
{
bufferPtr[n] = -.2f + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (0.4f)));
}
arm_lms_norm_f32(&lmsNorm_instanceSecPath, (const float*)bufferPtr, (float*)bufferPtr_L, Out, errOutput, readedSamples_L);
sampleCount -= readedSamples_L;
}
else
{
arm_fir_f32(&fir_instanceSecPath, (const float*)bufferPtr_R, SecPathFirOut, readedSamples_L);
//arm_fir_f32(&fir_instanceANC, (const float*)bufferPtr_R, (float*)bufferPtr, readedSamples_L);
arm_lms_anc(&lms_instance, (const float*)SecPathFirOut, (float*)bufferPtr_L, ANCFirOut, errOutput, readedSamples_L);
}
}
// WITH THIS WORK !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//arm_lms_norm_f32(
// &lmsNorm_instance, /* LMSNorm instance */
// (float*)bufferPtr_R, /* Input signal */
// (float*)bufferPtr_L, /* Reference-Error Signal */
// bufferPtr, /* Converged Signal */
// errOutput, /* Error Signal, this will become small as the signal converges */
// readedSamples_L); /* BlockSize */
//NLMSFilter.processNLMS(
// (float*)bufferPtr_R,
// (float*)bufferPtr_L,
// bufferPtr,
// readedSamples_L
//);
//monoIIR.processSamples((float*)bufferPtr, readedSamples_L);
#pragma omp section
{
memcpy(stereoFIR.state->coefficients.begin(), lmsNormCoeff_f32, filterSize * sizeof(float));
memcpy(lmsNormFIRCoeff_f32, lmsNormCoeff_f32, filterSize * sizeof(float));
//processedSamples = PaUtil_WriteRingBuffer(&ringBufferOut, (float*)bufferPtr, readedSamples_L);
//pthread_mutex_unlock( &count_mutex );
}
}
}
}
}
/***************************************************************************//**
* @brief Function to get current coefficients of NLMS filter
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return NLMS FIR Coefficients
******************************************************************************/
#if JUCE_USE_SIMD
dsp::FIR::Coefficients<float>* getCoeffs() {
return stereoFIR.state.getObject();
}
#else
dsp::FIR::Coefficients<float>* getCoeffs() {
return &coeffs;
}
#endif
/***************************************************************************//**
* @brief DELETE THIS FUNCTION
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
void beginTest()
{
//resultsBox.moveCaretToEnd();
//resultsBox.insertTextAtCaret (newLine + newLine + "Starting test..." + newLine);
//resultsBox.moveCaretToEnd();
playingSampleNum = recordedSampleNum = 0;
testIsRunning = true;
}
/***************************************************************************//**
* @brief Function to set output volume level
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param New volume value
* @return
******************************************************************************/
void setVolume(float vol) {
volume = vol;
}
/***************************************************************************//**
* @brief Function to return current SNR value
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return Float SNR value
******************************************************************************/
float getSNR() {
return 0;
}
/***************************************************************************//**
* @brief Function to put new data od FIFO queue
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param New sample value
* @param Channel to put sample to
* @return
******************************************************************************/
void pushNextSamplesIntoFifo(float *samples, bool channel, int dataSize) noexcept
{
// if the fifo contains enough data, set a flag to say
// that the next frame should now be rendered..
if (!channel) {
if (fifoIndex_L + dataSize >= 88200)
{
if (!nextSNRBlockReady_L)
{
nextSNRBlockReady_L = true;
}
}
else {
//fifo_L[fifoIndex_L++] = sample;
memcpy(&fifo_L[0] + fifoIndex_L, samples, dataSize * sizeof(float));
fifoIndex_L += dataSize;
}
}
if (channel) {
if (fifoIndex_P + dataSize >= 88200)
{
if (!nextSNRBlockReady_P)
{
nextSNRBlockReady_P = true;
}
}
else {
// fifo_P[fifoIndex_P++] = sample;
memcpy(&fifo_P[0] + fifoIndex_P, samples, dataSize * sizeof(float));
fifoIndex_P += dataSize;
}
}
if (nextSNRBlockReady_L && nextSNRBlockReady_P)
{
//SNR = arm_snr_f32(fifo_L, fifo_P, 88200);
zeromem(fifo_L, sizeof(fifo_L));
zeromem(fifo_P, sizeof(fifo_P));
fifoIndex_L = 0;
fifoIndex_P = 0;
nextSNRBlockReady_L = false;
nextSNRBlockReady_P = false;
}
}
/***************************************************************************//**
* @brief Overriden function to set audio device parameters before it starts
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param Pointer to IO Audio Device
* @return
******************************************************************************/
static unsigned NextPowerOf2(unsigned val)
{
val--;
val = (val >> 1) | val;
val = (val >> 2) | val;
val = (val >> 4) | val;
val = (val >> 8) | val;
val = (val >> 16) | val;
return ++val;
}
void audioDeviceAboutToStart (AudioIODevice* device) override
{
//omp_set_num_threads(3);
numOfSamples = device->getCurrentBufferSizeSamples();
testIsRunning = false;
playingSampleNum = recordedSampleNum = 0;
sampleRate = device->getCurrentSampleRate();
deviceInputLatency = device->getInputLatencyInSamples();
deviceOutputLatency = device->getOutputLatencyInSamples();
#if !JUCE_USE_SIMD
inData.setSize(2, numOfSamples);
outData.setSize(2,numOfSamples);
inData.clear();
outData.clear();
arm_lms_norm_init_f32(&lmsNorm_instance, filterSize, lmsNormCoeff_f32, lmsStateF32, muValue, numOfSamples);
coeffs = dsp::FIR::Coefficients<float>((const float*)lmsNormCoeff_f32, filterSize);
filter = dsp::FIR::Filter<float>(coeffs);
#else
interleaved = dsp::AudioBlock<dsp::SIMDRegister<float>>(interleavedBlockData, 2, numOfSamples);
zero = dsp::AudioBlock<float>(zeroData, dsp::SIMDRegister<float>::size(), numOfSamples); // [6]
zero.clear();
dsp::ProcessSpec spec;
spec.numChannels = 2;
spec.maximumBlockSize = numOfSamples;
spec.sampleRate = sampleRate;
arm_lms_norm_init_f32(&lmsNorm_instance, filterSize, lmsNormCoeff_f32, lmsStateF32, muValue, numOfSamples);
arm_lms_init_f32(&lms_instance, filterSize, lmsNormCoeff_f32, lmsStateF32, muValue, numOfSamples);
arm_lms_init_f32(&lms_instanceSecPath, filterSize, SecPathlmsNormCoeff_f32, SecPathlmsStateF32, muValue / 10.0f, numOfSamples);
arm_lms_norm_init_f32(&lmsNorm_instanceSecPath, filterSize, SecPathlmsNormCoeff_f32, SecPathlmsStateF32, muValue / 10.0f, numOfSamples);
arm_fir_init_f32(&fir_instanceSecPath, filterSize, SecPathlmsNormCoeff_f32, SecPathFirState, numOfSamples);
arm_fir_init_f32(&fir_instanceANC, filterSize, lmsNormFIRCoeff_f32, ANCFirState, numOfSamples);
stereoFIR.state = new dsp::FIR::Coefficients<float>((const float*)lmsNormCoeff_f32, filterSize);
stereoFIR.prepare(spec);
stereoIIR.state = dsp::IIR::Coefficients<float>::makeLowPass(sampleRate, 14000.0f, 20);
stereoIIR.prepare(spec);
monoIIR.setCoefficients((IIRCoefficients::makeLowPass(sampleRate, 2000, 0.7f)));
monoIIRHP.setCoefficients(IIRCoefficients::makeHighPass(sampleRate, 30, 0.7f));
NLMSFilter = Adaptive::NLMS(filterSize, muValue, 0.00000001f);
FbNLMSFilter = Adaptive::FbLMS(filterSize, muValue);
int numSamples = NextPowerOf2((unsigned)(SAMPLE_RATE * 0.5 * NR_OF_CHANNELS));
int numBytes = numSamples * sizeof(float);
ringBufferDataIn_L = (float *)malloc(numBytes);
printf("Creating ringBuffIn array \n");
if (ringBufferDataIn_L == NULL)
{
printf("Could not allocate input ring buffer data.\n");
}
printf("Initializing ringBuffIn\n");
if (PaUtil_InitializeRingBuffer(&ringBufferIn_L, sizeof(float), numSamples, ringBufferDataIn_L) < 0)
{
printf("Failed to initialize input ring buffer. Size is not power of 2 ??\n");
}
ringBufferDataIn_R = (float *)malloc(numBytes);
printf("Creating ringBuffIn array \n");
if (ringBufferDataIn_R == NULL)
{
printf("Could not allocate input ring buffer data.\n");
}
printf("Initializing ringBuffIn\n");
if (PaUtil_InitializeRingBuffer(&ringBufferIn_R, sizeof(float), numSamples, ringBufferDataIn_R) < 0)
{
printf("Failed to initialize input ring buffer. Size is not power of 2 ??\n");
}
printf("Creating ringBuffOut array \n");
ringBufferDataOut = (float *)malloc(numBytes);
if (ringBufferDataOut == NULL)
{
printf("Could not allocate output ring buffer data.\n");
}
printf("Initializing ringBuffOut\n");
if (PaUtil_InitializeRingBuffer(&ringBufferOut, sizeof(float), numSamples, ringBufferDataOut) < 0)
{
printf("Failed to initialize output ring buffer. Size is not power of 2 ??\n");
}
#endif
startThread();
}
/***************************************************************************//**
* @brief Overriden function to clean audio device parameters after it stops
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
void audioDeviceStopped() override
{
sampleRate = 0;
}
/***************************************************************************//**
* @brief Callback of IO Audio Device
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param Double pointer to input data
* @param Number of input channels
* @param Double pointer to input data
* @param Number of output channels
* @param Number of samples
* @return
******************************************************************************/
static ring_buffer_size_t rbs_min(ring_buffer_size_t a, ring_buffer_size_t b)
{
return (a < b) ? a : b;
}
void dcBlocker(float *in, float *out, int blockSize)
{
for (int i = 0; i < blockSize; i++)
{
out[i] = in[i] - xm1 + 0.995f * ym1;
xm1 = in[i];
ym1 = out[i];
}
}
void audioDeviceIOCallback(const float** inputChannelData, int numInputChannels,
float** outputChannelData, int numOutputChannels, int numSamples) override
{
static volatile ring_buffer_size_t availableSamples = 0;
static volatile ring_buffer_size_t readedSamples = 0;
static volatile ring_buffer_size_t processedSamples = 0;
static volatile float buffer[FRAMES_PER_BUFFER * 4];
volatile static int sampleDelayCall = 0;
volatile static int sampleCountCall = 48000 * 20;
float *bufferPtr = (float *)buffer;
const ScopedLock s1(lock);
(void)numInputChannels;
(void)numOutputChannels;
//=========================================================================================================================================================
#pragma omp parallel sections
{
const float *rptr_L = (const float *)inputChannelData[0];
ring_buffer_size_t elementsWriteable_L = PaUtil_GetRingBufferWriteAvailable(&ringBufferIn_L);
ring_buffer_size_t elementsToWrite_L = rbs_min(elementsWriteable_L, (ring_buffer_size_t)(numSamples));
PaUtil_WriteRingBuffer(&ringBufferIn_L, rptr_L, elementsToWrite_L);
const float *rptr_R = (const float *)inputChannelData[1];
ring_buffer_size_t elementsWriteable_R = PaUtil_GetRingBufferWriteAvailable(&ringBufferIn_R);
ring_buffer_size_t elementsToWrite_R = rbs_min(elementsWriteable_R, (ring_buffer_size_t)(numSamples));
PaUtil_WriteRingBuffer(&ringBufferIn_R, rptr_R, elementsToWrite_R);
//#pragma omp section
// {
// float *wptr = (float *)outputChannelData[0];
// ring_buffer_size_t elementsToPlay = PaUtil_GetRingBufferReadAvailable(&ringBufferOut);
// ring_buffer_size_t elementsToRead = rbs_min(elementsToPlay, (ring_buffer_size_t)(numSamples));
// readedSamples = PaUtil_ReadRingBuffer(&ringBufferOut, wptr, elementsToRead);
// }
// if (readedSamples < numOfSamples)
// {
//#pragma omp parallel for shedule(static, 4)
// for (int i = readedSamples; i < numOfSamples; i++)
// {
// outputChannelData[0][i] = 0;
// }
// }
}
#pragma omp parallel sections
{
//#pragma omp section
// {
// pushNextSamplesIntoFifo((float*)inputChannelData[0], 0, numSamples);
// pushNextSamplesIntoFifo((float*)inputChannelData[1], 1, numSamples);
// }
//static std::vector<float> previous_reference_samples(numSamples, 0.0f);
//static FxLMSFilter<FX_FILTER_LENGTH, FILTER_LENGTH> fxlms_filter(muValue,
// FX_FILTER_COEFFS, filterSize);
//for (unsigned long i = 1; i < numSamples; i++) {
// float error_sample = inputChannelData[0][i];
// float reference_sample = inputChannelData[1][i];
//
// float correction_sample = fxlms_filter.lms_step(previous_reference_samples.at(i), error_sample);
// previous_reference_samples.at(i) = reference_sample;
// float fixed_correction_sample = (correction_sample * volume);
// outputChannelData[0][i] = fixed_correction_sample;
//}
sampleDelayCall += numSamples;
if (sampleDelayCall > FRAMES_PER_BUFFER)
{
if (sampleCountCall > 0)
{
for (int n = 0; n < numSamples; n++)
{
outputChannelData[0][n] = -.2f + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (0.4f)));
}
sampleCountCall -= numSamples;
}
else
{
arm_fir_f32(&fir_instanceANC, (const float*)inputChannelData[1], (float*)outputChannelData[0], numSamples);
}
}
//arm_lms_norm_f32(
// &lmsNorm_instance, /* LMSNorm instance */
// (float*)inputChannelData[1], /* Input signal */
// (float*)inputChannelData[0], /* Reference-Error Signal */
// outputChannelData[0], /* Converged Signal */
// errOutput, /* Error Signal, this will become small as the signal converges */
// numSamples); /* BlockSize */
//arm_lms_norm_anc(
// &lmsNorm_instance, /* LMSNorm instance */
// (const float*)inputChannelData[1], /* Input signal */
// (float*)inputChannelData[0], /* Reference-Error Signal */
// outputChannelData[0], /* Converged Signal */
// errOutput, /* Error Signal, this will become small as the signal converges */
// numSamples); /* BlockSize */
//NLMSFilter.processNLMS(
// (float*)inputChannelData[1],
// (float*)inputChannelData[0],
// outputChannelData[0],
// numSamples
//);
#pragma omp section
{
// stereoFIR.state = new dsp::FIR::Coefficients<float>((const float*)lmsNormCoeff_f32, NUM_OF_TAPS);
//memcpy(stereoFIR.state->coefficients.begin(), lmsNormCoeff_f32, filterSize * sizeof(float));
//memcpy(stereoFIR.state->coefficients.begin(), NLMSFilter.getCoeff(), filterSize * sizeof(float));
//memcpy(stereoFIR.state->coefficients.begin(), FbNLMSFilter.getCoeff(), filterSize * sizeof(float));
//memcpy(stereoFIR.state->coefficients.begin(), fxlms_filter.fir_filter.get_coefficients().data(), filterSize * sizeof(float));
//std::copy(fxlms_filter.fir_filter.get_coefficients().data(), fxlms_filter.fir_filter.get_coefficients().data() + FILTER_LENGTH, stereoFIR.state->coefficients.begin());
}
#pragma omp section
{
FloatVectorOperations::multiply((float*)outputChannelData[0], volume, numSamples);
//monoIIR.processSamples(outputChannelData[0], numSamples);
FloatVectorOperations::negate(outputChannelData[0], outputChannelData[0], numSamples);
FloatVectorOperations::copy((float*)outputChannelData[1], (float*)outputChannelData[0], numSamples);
}
//=========================================================================================================================================================
// auto end = std::chrono::high_resolution_clock::now();
// auto time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
}
}
/***************************************************************************//**
* @brief Function to process new pack of input data
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param Pointer to array of noise audio
* @param Pointer to array of destiny audio
* @return
******************************************************************************/
void processSamples(float *x, float *d) {
arm_lms_norm_f32(
&lmsNorm_instance, /* LMSNorm instance */
x, /* Input signal */
d, /* Reference Signal */
Out, /* Converged Signal */
errOutput, /* Error Signal, this will become small as the signal converges */
numOfSamples); /* BlockSize */
#if JUCE_USE_SIMD
// stereoFIR.state = new dsp::FIR::Coefficients<float>((const float*)lmsNormCoeff_f32, NUM_OF_TAPS);
memcpy(stereoFIR.state->coefficients.begin(), lmsNormCoeff_f32, filterSize * sizeof(float));
#else
memcpy(coeffs.coefficients.begin(), lmsNormCoeff_f32, filterSize * sizeof(float));
#endif
}
private:
CriticalSection lock;
int filterSize = 1;
float muValue = 1;
int playingSampleNum = 0;
int recordedSampleNum = -1;
double sampleRate = 0.0;
bool testIsRunning = false;
int deviceInputLatency, deviceOutputLatency, numOfSamples;
float volume = 1.0f;
arm_lms_norm_instance_f32 lmsNorm_instance;
arm_lms_instance_f32 lms_instance;
arm_lms_instance_f32 lms_instanceSecPath;
arm_lms_norm_instance_f32 lmsNorm_instanceSecPath;
arm_fir_instance_f32 fir_instanceSecPath;
arm_fir_instance_f32 fir_instanceANC;
float y[FRAMES_PER_BUFFER] = { 0.0f }; // Output data
float e[FRAMES_PER_BUFFER] = { 0.0f }; // Error data
float Out[FRAMES_PER_BUFFER] = { 0.0f }; // Output data
float errOutput[FRAMES_PER_BUFFER] = { 0.0f }; // Error data
float errOutput2[FRAMES_PER_BUFFER] = { 0.0f }; // Error data
float lmsStateF32[NUM_OF_TAPS + FRAMES_PER_BUFFER] = { 0.0f }; // Array for NLMS algorithm
float lmsNormCoeff_f32[NUM_OF_TAPS] = { 0.0f }; // NLMS Coefficients
float lmsNormFIRCoeff_f32[NUM_OF_TAPS] = { 0.0f }; // NLMS Coefficients
float SecPathlmsStateF32[NUM_OF_TAPS + FRAMES_PER_BUFFER] = { 0.0f }; // Array for NLMS algorithm
float SecPathlmsNormCoeff_f32[NUM_OF_TAPS] = { 0.0f }; // NLMS Coefficients
float SecPathFirState[NUM_OF_TAPS] = { 0.0f };
float SecPathFirOut[FRAMES_PER_BUFFER] = { 0.0f };
float ANCFirState[NUM_OF_TAPS] = { 0.0f };
float ANCFirOut[FRAMES_PER_BUFFER] = { 0.0f };
float zeros[FRAMES_PER_BUFFER] = { 0.0f };
float xm1 = 0;
float ym1 = 0;
Adaptive::NLMS NLMSFilter;
Adaptive::FbLMS FbNLMSFilter;
float fifo_L[88200];
int fifoIndex_L = 0;
bool nextSNRBlockReady_L = false;
float fifo_P[88200];
int fifoIndex_P = 0;
bool nextSNRBlockReady_P = false;
PaUtilRingBuffer ringBufferIn_L;
PaUtilRingBuffer ringBufferIn_R;
PaUtilRingBuffer ringBufferOut;
float *ringBufferDataIn_L;
float *ringBufferDataIn_R;
float *ringBufferDataOut;
#if JUCE_USE_SIMD
dsp::AudioBlock<float> inBlock;
dsp::AudioBlock<float> outBlock;
dsp::AudioBlock<dsp::SIMDRegister<float>> interleaved;
dsp::AudioBlock<float> zero;
HeapBlock<char> interleavedBlockData, zeroData;
HeapBlock<const float*> channelPointers{ dsp::SIMDRegister<float>::size() };
dsp::ProcessorDuplicator<dsp::FIR::Filter<dsp::SIMDRegister<float>>, dsp::FIR::Coefficients<float>> stereoFIR;
dsp::ProcessorDuplicator<dsp::IIR::Filter<dsp::SIMDRegister<float>>, dsp::IIR::Coefficients<float>> stereoIIR;
IIRFilter monoIIR;
IIRFilter monoIIRHP;
#else
AudioBuffer<float> inData, outData;
dsp::FIR::Coefficients<float> coeffs;
dsp::FIR::Filter<float> filter;
#endif
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(ANCInstance)
};
//==============================================================================
class ActiveNoiseCancelling : public Component,
private Timer,
public Slider::Listener
{
public:
/***************************************************************************//**
* @brief Default constructor of ActiveNoiseCancelling class
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
ActiveNoiseCancelling()
{
setOpaque (true);
startTimerHz(3);
// Set up Volume Label text box
volumeLabel.setText("Volume: ", dontSendNotification);
volumeLabel.attachToComponent(&volumeSlider, true);
volumeLabel.setColour(volumeLabel.textColourId, Colour(255, 255, 255));
// Set up Volume Slider box
volumeSlider.setRange(0, 10);
volumeSlider.addListener(this);
volumeSlider.setValue(1);
volumeSlider.setTextBoxStyle(Slider::TextBoxLeft, false, 120, volumeSlider.getTextBoxHeight());
// Set up Filter size Label text box
filterSizeLabel.setText("Filter size: ", dontSendNotification);
filterSizeLabel.attachToComponent(&filterSizeSlider, true);
filterSizeLabel.setColour(volumeLabel.textColourId, Colour(255, 255, 255));
// Set up Filter Size Slider box
filterSizeSlider.setRange(256.0, 4096.0, 128.0);
filterSizeSlider.addListener(this);
filterSizeSlider.setValue(512);
filterSizeSlider.setTextBoxStyle(Slider::TextBoxLeft, false, 120, volumeSlider.getTextBoxHeight());
// Set up Filter size Label text box
filterMULabel.setText("Filter mu: ", dontSendNotification);
filterMULabel.attachToComponent(&filterMUSlider, true);
filterMULabel.setColour(volumeLabel.textColourId, Colour(255, 255, 255));
// Set up Filter Size Slider box
filterMUSlider.setRange(0.00001, 1.0, 0.00001);
filterMUSlider.addListener(this);
filterMUSlider.setValue(0.25);
filterMUSlider.setTextBoxStyle(Slider::TextBoxLeft, false, 120, volumeSlider.getTextBoxHeight());
// Set up FFT Scale Slider box
FFTScaleSlider.setTextBoxStyle(Slider::TextBoxBelow, false, 30, FFTScaleSlider.getTextBoxHeight());
FFTScaleSlider.setSliderStyle(Slider::TwoValueVertical);
FFTScaleSlider.setRange(-99, 0, 1);
FFTScaleSlider.setTextValueSuffix(" dB");
FFTScaleSlider.addListener(this);
FFTScaleSlider.setMinValue(-99);
FFTScaleSlider.setMaxValue(0);
// Add components to be visible
addAndMakeVisible(FFTScaleSlider);
addAndMakeVisible(volumeLabel);
addAndMakeVisible(volumeSlider);
addAndMakeVisible(filterSizeSlider);
addAndMakeVisible(filterMUSlider);
// Reset and add main audio processing components
liveAudioScroller.reset (new LiveScrollingAudioDisplay());
spectrumAnalyser.reset(new AnalyserComponent());
addAndMakeVisible(spectrumAnalyser.get());
addAndMakeVisible (liveAudioScroller.get());
filterVisualizer.reset(new FilterVisualizer());
filterVisualizer->addCoefficients(&elo, Colours::white);
addAndMakeVisible(filterVisualizer.get());
addAndMakeVisible(SNR_Value);
SNR_Value.setColour(SNR_Value.backgroundColourId, Colour(39, 50, 56));
SNR_Value.setColour(SNR_Value.textColourId, Colour(137, 176, 196));
SNR_Value.setJustificationType(Justification::centred);
SNR_Value.setEditable(false);
SNR_Value.setText("Run ANC to see results", dontSendNotification);
addAndMakeVisible (startTestButton);
startTestButton.onClick = [this] { startTest(); };
addAndMakeVisible(startVisualizigData);
startVisualizigData.onClick = [this]
{
if (isVisualisingRunning) {
audioDeviceManager.removeAudioCallback(liveAudioScroller.get());
audioDeviceManager.removeAudioCallback(spectrumAnalyser.get());
isVisualisingRunning = false;
}
else {
audioDeviceManager.addAudioCallback(liveAudioScroller.get());
audioDeviceManager.addAudioCallback(spectrumAnalyser.get());
isVisualisingRunning = true;
}
};
#ifndef JUCE_DEMO_RUNNER
RuntimePermissions::request (RuntimePermissions::recordAudio,
[this] (bool granted)
{
int numInputChannels = granted ? 2 : 0;
audioDeviceManager.initialise (numInputChannels, 2, nullptr, true, {}, nullptr);
});
#endif
addAndMakeVisible(explanationLabel);
explanationLabel.setFont(Font(15.0f, Font::plain));
explanationLabel.setJustificationType(Justification::topLeft);
explanationLabel.setEditable(false, false, false);
explanationLabel.setColour(TextEditor::textColourId, Colours::black);
explanationLabel.setColour(TextEditor::backgroundColourId, Colour(0x00000000));
addAndMakeVisible(recordButton);
recordButton.setColour(TextButton::buttonColourId, Colour(0xffff5c5c));
recordButton.setColour(TextButton::textColourOnId, Colours::black);
recordButton.onClick = [this]
{
if (recorder.isRecording())
stopRecording();
else
startRecording();
};
addAndMakeVisible(recordingThumbnail);
#ifndef JUCE_DEMO_RUNNER
RuntimePermissions::request(RuntimePermissions::recordAudio,
[this](bool granted)
{
int numInputChannels = granted ? 2 : 0;
audioDeviceManager.initialise(numInputChannels, 2, nullptr, true, {}, nullptr);
});
#endif
// audioDeviceManager.addAudioCallback(latencyTester.get());
audioDeviceManager.addAudioCallback (liveAudioScroller.get());
audioDeviceManager.addAudioCallback(spectrumAnalyser.get());
audioDeviceManager.addAudioCallback(&recorder);
setSize (500, 800);
}
/***************************************************************************//**
* @brief Default destructor of ActiveNoiseCancelling class
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
~ActiveNoiseCancelling()
{
audioDeviceManager.removeAudioCallback(&recorder);
audioDeviceManager.removeAudioCallback (liveAudioScroller.get());
audioDeviceManager.removeAudioCallback(spectrumAnalyser.get());
audioDeviceManager.removeAudioCallback(ANC.get());
ANC.reset();
liveAudioScroller.reset();
spectrumAnalyser.reset();
filterVisualizer.reset();
}
/***************************************************************************//**
* @brief function of timer callback - specifies what to do when timer is called
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
void timerCallback() override
{
if (ANC.get() != nullptr)
{
ScopedLock sl(lock);
SNR_Value.setText(String("SNR = ") + String(ANC->getSNR()) + String(" dB"), NotificationType::dontSendNotification);
elo = ANC->getCoeffs();
}
}
/***************************************************************************//**
* @brief Overriden function of Slider's class
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param Pointer to Slider object
* @return
******************************************************************************/
void sliderValueChanged(Slider* slider) override
{
if (slider == &volumeSlider) {
if (ANC.get() != nullptr) {
ANC->setVolume((float)volumeSlider.getValue());
}
}
else if (slider == &FFTScaleSlider) {
if (spectrumAnalyser.get() != nullptr) {
spectrumAnalyser->setScaleValue(FFTScaleSlider.getMinValue(), FFTScaleSlider.getMaxValue());
}
}
}
/***************************************************************************//**
* @brief Function to start ANC
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
void startTest()
{
if (ANC.get() == nullptr)
{
ANC.reset (new ANCInstance(filterSizeSlider.getValue(),filterMUSlider.getValue()));
audioDeviceManager.addAudioCallback (ANC.get());
filterSizeSlider.setEnabled(false);
filterMUSlider.setEnabled(false);
}
ANC->beginTest();
}
/***************************************************************************//**
* @brief Overriden function to draw new data on screen
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param Reference to graphic module
* @return
******************************************************************************/
void paint (Graphics& g) override
{
g.fillAll (findColour (ResizableWindow::backgroundColourId));
}
/***************************************************************************//**
* @brief Overriden function called when application window is resized
* @author Michał Berdzik
* @version 1.0 26/09/2019
* @param
* @return
******************************************************************************/
void resized() override
{
auto b = getLocalBounds().reduced (5);
volumeSlider.setBounds(b.getX() + 80, b.getY(), b.getWidth() / 2, b.getHeight() / 20);
startVisualizigData.setBounds(b.getX() + 80 + b.getWidth() / 2, b.getY(), b.getWidth() / 4 - 40, b.getHeight() / 20);
recordButton.setBounds(b.getX() + 40 + b.getWidth() / 2 + b.getWidth() / 4, b.getY(), b.getWidth() / 4 - 40, b.getHeight() / 20);
b.removeFromTop(b.getHeight() / 20);
b.removeFromTop(3);
filterSizeSlider.setBounds(b.getX() + 80, b.getY(), b.getWidth() / 2 - 80, b.getHeight() / 20);
filterMUSlider.setBounds(b.getX() + b.getWidth() / 2 + 80, b.getY(), b.getWidth() / 2 - 80, b.getHeight() / 20);
b.removeFromTop(b.getHeight() / 20);
b.removeFromTop(3);
if (liveAudioScroller.get() != nullptr)
{
liveAudioScroller->setBounds (b.removeFromTop (b.getHeight() / 8));
b.removeFromTop (3);
}
startTestButton.setBounds (b.removeFromBottom (b.getHeight() / 15));
b.removeFromBottom (10);
SNR_Value.setBounds(b.removeFromBottom(b.getHeight() / 15));
b.removeFromBottom(10);
FFTScaleSlider.setBounds(b.getX(), b.getY(), 30, b.getHeight() / 2);
spectrumAnalyser->setBounds(b.getX() + 30, b.getY(), b.getWidth() - 30, b.getHeight() / 2);
b.removeFromTop(b.getHeight() / 2 + 3);
filterVisualizer->setBounds(b);
}
private:
// if this PIP is running inside the demo runner, we'll use the shared device manager instead
#ifndef JUCE_DEMO_RUNNER
AudioDeviceManager audioDeviceManager;
#else
AudioDeviceManager& audioDeviceManager { getSharedAudioDeviceManager (2, 2) };
#endif
CriticalSection lock;
bool isVisualisingRunning = true;
std::unique_ptr<ANCInstance> ANC;
std::unique_ptr<LiveScrollingAudioDisplay> liveAudioScroller;
std::unique_ptr<AnalyserComponent> spectrumAnalyser;
std::unique_ptr<FilterVisualizer> filterVisualizer;
RecordingThumbnail recordingThumbnail;
AudioRecorder recorder{ recordingThumbnail.getAudioThumbnail() };
Label explanationLabel{ {}, "This page demonstrates how to record a wave file from the live audio input..\n\n"
#if (JUCE_ANDROID || JUCE_IOS)
"After you are done with your recording you can share with other apps."
#else
"Pressing record will start recording a file in your \"Documents\" folder."
#endif
};
TextButton recordButton{ "Record" };
File lastRecording;
TextButton startTestButton { "Run ANC" };
TextButton startVisualizigData{ "Run/Stop Charts" };
Label SNR_Value;
//TextEditor resultsBox;
Slider volumeSlider;
Label volumeLabel;
Slider filterSizeSlider;
Label filterSizeLabel;
Slider filterMUSlider;
Label filterMULabel;
Slider FFTScaleSlider;
dsp::FIR::Coefficients<float>::Ptr elo;
void startRecording()
{
if (!RuntimePermissions::isGranted(RuntimePermissions::writeExternalStorage))
{
SafePointer<ActiveNoiseCancelling> safeThis(this);
RuntimePermissions::request(RuntimePermissions::writeExternalStorage,
[safeThis](bool granted) mutable
{
if (granted)
safeThis->startRecording();
});
return;
}
auto parentDir = File::getSpecialLocation(File::userDocumentsDirectory);
lastRecording = parentDir.getNonexistentChildFile("JUCE Demo Audio Recording", ".wav");
recorder.startRecording(lastRecording);
recordButton.setButtonText("Stop");
recordingThumbnail.setDisplayFullThumbnail(false);
}
void stopRecording()
{
recorder.stop();
lastRecording = File();
recordButton.setButtonText("Record");
recordingThumbnail.setDisplayFullThumbnail(true);
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (ActiveNoiseCancelling)
};
|
top_k_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class TopkKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// Get the top k elements of each row of input tensor
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
size_t k = static_cast<int>(ctx.Attr<int>("k"));
auto* k_t = ctx.Input<Tensor>("K");
if (k_t) {
k = k_t->data<int>()[0];
framework::DDim output_dims = output->dims();
output_dims[output_dims.size() - 1] = k;
output->Resize(output_dims);
indices->Resize(output_dims);
}
T* output_data = output->mutable_data<T>(ctx.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
// reshape input to a flattern matrix(like flat_inner_dims)
framework::DDim inputdims = input->dims();
const size_t row =
pten::product(pten::slice_ddim(inputdims, 0, inputdims.size() - 1));
const size_t col = inputdims[inputdims.size() - 1];
Eigen::DSizes<int, 2> flat2dims(row, col);
// NOTE: eigen shape doesn't affect paddle tensor.
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (size_t i = 0; i < row; i++) {
std::vector<std::pair<T, size_t>> vec;
vec.reserve(col);
// 1D vector
if (inputdims.size() == 1) {
auto eg_input = framework::EigenVector<T>::Flatten(*input);
for (size_t j = 0; j < col; j++) {
vec.push_back(std::pair<T, size_t>(eg_input(j), j));
}
} else {
auto eg_input =
framework::EigenMatrix<T>::Reshape(*input, inputdims.size() - 1);
for (size_t j = 0; j < col; j++) {
vec.push_back(std::pair<T, size_t>(eg_input(i, j), j));
}
}
std::partial_sort(
vec.begin(), vec.begin() + k, vec.end(),
[](const std::pair<T, size_t>& l, const std::pair<T, size_t>& r) {
return l.first > r.first;
});
for (size_t j = 0; j < k; j++) {
output_data[i * k + j] = vec[j].first;
indices_data[i * k + j] = int64_t(vec[j].second);
}
}
}
};
template <typename DeviceContext, typename T>
class TopkGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<Tensor>("Indices");
auto* x_grad = context.Output<Tensor>(framework::GradVarName("X"));
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
const T* out_grad_data = out_grad->data<T>();
const int64_t* indices_data = indices->data<int64_t>();
size_t k = indices->dims()[indices->dims().size() - 1];
framework::DDim xdims = x->dims();
const size_t row =
pten::product(pten::slice_ddim(xdims, 0, xdims.size() - 1));
const size_t col = xdims[xdims.size() - 1];
memset(x_grad_data, 0, row * col * sizeof(T));
for (size_t i = 0; i < row; ++i) {
for (size_t j = 0; j < k; ++j) {
size_t idx = indices_data[i * k + j];
x_grad_data[i * col + idx] = out_grad_data[i * k + j];
}
}
}
};
} // namespace operators
} // namespace paddle
|
GB_emult_template.c | //------------------------------------------------------------------------------
// GB_emult_template: phase1 and phase2 for C=A.*B, C<M>=A.*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Computes C=A.*B (no mask) or C<M>=A.*B (mask present and not complemented).
// Does not handle the case C<!M>=A.*B. The complemented mask is handled in
// GB_mask instead. If present, the mask M is assumed to be very sparse
// compared with A and B.
// phase1: does not compute C itself, but just counts the # of entries in each
// vector of C. Fine tasks compute the # of entries in their slice of a
// single vector of C, and the results are cumsum'd.
// phase2: computes C, using the counts computed by phase1.
{
// iB_first is unused if the operator is FIRST or PAIR
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A, B, M, and C
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t vlen = A->vlen ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bi = B->i ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mh = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
const GB_void *GB_RESTRICT Mx = NULL ;
size_t msize = 0 ;
if (M != NULL)
{
Mp = M->p ;
Mh = M->h ;
Mi = M->i ;
Mx = (Mask_struct ? NULL : (M->x)) ;
msize = M->type->size ;
}
#if defined ( GB_PHASE_2_OF_2 )
const GB_ATYPE *GB_RESTRICT Ax = A->x ;
const GB_ATYPE *GB_RESTRICT Bx = B->x ;
const int64_t *GB_RESTRICT Cp = C->p ;
const int64_t *GB_RESTRICT Ch = C->h ;
int64_t *GB_RESTRICT Ci = C->i ;
GB_CTYPE *GB_RESTRICT Cx = C->x ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j); phase2: compute C
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = (Ch == NULL) ? k : Ch [k] ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (Ch == Ah) ? k :
((C_to_A == NULL) ? j : C_to_A [k]) ;
if (kA >= 0)
{
pA = Ap [kA] ;
pA_end = Ap [kA+1] ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
bool adense = (ajnz == len) ;
int64_t pA_start = pA ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1 ;
if (ajnz > 0)
{
iA_first = Ai [pA] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iA_last = -1 ;
if (ajnz > 0)
{
iA_last = Ai [pA_end-1] ;
}
#endif
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (Ch == Bh) ? k :
((C_to_B == NULL) ? j : C_to_B [k]) ;
if (kB >= 0)
{
pB = Bp [kB] ;
pB_end = Bp [kB+1] ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
bool bdense = (bjnz == len) ;
int64_t pB_start = pB ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1 ;
if (bjnz > 0)
{
iB_first = Bi [pB] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iB_last = -1 ;
if (bjnz > 0)
{
iB_last = Bi [pB_end-1] ;
}
#endif
//------------------------------------------------------------------
// phase1: count nnz (C (:,j)); phase2: compute C(:,j)
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (ajnz == 0 || bjnz == 0)
{
//--------------------------------------------------------------
// A(:,j) and/or B(:,j) are empty
//--------------------------------------------------------------
;
}
else if (iA_last < iB_first || iB_last < iA_first)
{
//--------------------------------------------------------------
// intersection of A(:,j) and B(:,j) is empty
//--------------------------------------------------------------
// the last entry of A(:,j) comes before the first entry
// of B(:,j), or visa versa
;
}
else
#endif
if (M == NULL)
{
if (adense && bdense)
{
//----------------------------------------------------------
// A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
Ci [pC + p] = p + iA_first ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// A(:,j) is dense, B(:,j) is sparse: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
for (int64_t p = 0 ; p < bjnz ; p++)
{
int64_t i = Bi [pB + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + i - iA_first) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// A(:,j) is sparse, B(:,j) is dense: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = Ai [pA + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + i - iB_first) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//----------------------------------------------------------
// A(:,j) and B(:,j) have about the same # of entries
//----------------------------------------------------------
// linear-time scan of A(:,j) and B(:,j)
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
// A(i,j) exists but not B(i,j)
pA++ ;
}
else if (iB < iA)
{
// B(i,j) exists but not A(i,j)
pB++ ;
}
else
{
// both A(i,j) and B(i,j) exist
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = iB ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
pA++ ;
pB++ ;
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
}
else
{
//--------------------------------------------------------------
// Mask is present
//--------------------------------------------------------------
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1], which is
// a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch == Mh)
{
// Ch is the same as Mh (a shallow copy), or both NULL
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = Mp [kM] ;
pM_end = Mp [kM+1] ;
}
}
//--------------------------------------------------------------
// C(:,j)<M(:,j) = A(:,j) .* B (:,j)
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) .* B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
if (adense)
{
// A(:,j) is dense; use direct lookup for A(i,j)
pA = pA_start + i - iA_first ;
}
else
{
// A(:,j) is sparse; use binary search for A(i,j)
int64_t apright = pA_end - 1 ;
bool afound ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
if (!afound) continue ;
}
ASSERT (Ai [pA] == i) ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
if (bdense)
{
// B(:,j) is dense; use direct lookup for B(i,j)
pB = pB_start + i - iB_first ;
}
else
{
// B(:,j) is sparse; use binary search for B(i,j)
int64_t bpright = pB_end - 1 ;
bool bfound ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
if (!bfound) continue ;
}
ASSERT (Bi [pB] == i) ;
//----------------------------------------------------------
// C(i,j) = A(i,j) .* B(i,j)
//----------------------------------------------------------
// C (i,j) = A (i,j) .* B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
nstream.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
/**********************************************************************
NAME: nstream
PURPOSE: To compute memory bandwidth when adding a vector of a given
number of double precision values to the scalar multiple of
another vector of the same length, and storing the result in
a third vector.
USAGE: The program takes as input the number of threads, the number
of iterations to loop over the triad vectors, the length of the
vectors, and the offset between vectors
<progname> <# threads> <# iterations> <vector length> <offset>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
external functions are used in this program:
wtime()
bail_out()
checkTRIADresults()
NOTES: Bandwidth is determined as the number of words read, plus the
number of words written, times the size of the words, divided
by the execution time. For a vector length of N, the total
number of words read and written is 3*N*sizeof(double).
HISTORY: This code is loosely based on the Stream benchmark by John
McCalpin, but does not follow all the Stream rules. Hence,
reported results should not be associated with Stream in
external publications
REVISION: Modified by Tim Mattson to handle OpenMP correctly
REVISION: Modified by Rob Van der Wijngaart, December 2005, to
parameterize vector size and offsets through compiler flags.
Also removed all Stream cases except TRIAD.
REVISION: Modified by Rob Van der Wijngaart, May 2006, to introduce
dependence between successive triad operations. This is
necessary to avoid dead code elimination
**********************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
#define DEFAULTMAXLENGTH 2000000
#ifdef MAXLENGTH
#if MAXLENGTH > 0
#define N MAXLENGTH
#else
#define N DEFAULTMAXLENGTH
#endif
#else
#define N DEFAULTMAXLENGTH
#endif
#ifdef STATIC_ALLOCATION
/* use static to make sure it goes on the heap, not the stack */
static double a[N];
#else
static double * RESTRICT a;
#endif
static double * RESTRICT b;
static double * RESTRICT c;
#define SCALAR 3.0
static int checkTRIADresults(int, long int);
int main(int argc, char **argv)
{
long int j, k; /* dummies */
double scalar; /* constant used in Triad operation */
int iterations; /* number of times vector loop gets repeated */
long int length, /* total vector length */
offset; /* offset between vectors a and b, and b and c */
double bytes; /* memory IO size */
size_t space; /* memory used for a single vector */
double nstream_time, /* timing parameters */
avgtime = 0.0,
maxtime = 0.0,
mintime = 366.0*24.0*3600.0; /* set the minimum time to a
large value; one leap year should be enough */
int nthread_input; /* thread parameters */
int nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
/**********************************************************************************
* process and test input parameters
***********************************************************************************/
if (argc != 5){
printf("Usage: %s <# threads> <# iterations> <vector length> <offset>\n", *argv);
exit(EXIT_FAILURE);
}
nthread_input = atoi(*++argv);
iterations = atoi(*++argv);
length = atol(*++argv);
offset = atol(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
if ((iterations < 1)) {
printf("ERROR: Invalid number of iterations: %d\n", iterations);
exit(EXIT_FAILURE);
}
if (length < 0) {
printf("ERROR: Invalid vector length: %ld\n", length);
exit(EXIT_FAILURE);
}
if (offset < 0) {
printf("ERROR: Incvalid array offset: %ld\n", offset);
exit(EXIT_FAILURE);
}
#ifdef STATIC_ALLOCATION
if ((3*length + 2*offset) > N) {
printf("ERROR: vector length/offset %ld/%ld too ", length, offset);
printf("large; increase MAXLENGTH in Makefile or decrease vector length\n");
exit(EXIT_FAILURE);
}
#endif
omp_set_num_threads(nthread_input);
#ifndef STATIC_ALLOCATION
space = (3*length + 2*offset)*sizeof(double);
a = (double *) malloc(space);
if (!a) {
printf("ERROR: Could not allocate %ld words for vectors\n",
3*length+2*offset);
exit(EXIT_FAILURE);
}
#endif
b = a + length + offset;
c = b + length + offset;
#pragma omp parallel private(j,k)
{
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP stream triad: A = B + scalar*C\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %i;\n",nthread_input);
printf("Vector length = %ld\n", length);
printf("Offset = %ld\n", offset);
printf("Number of iterations = %d\n", iterations);
}
}
bail_out(num_error);
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) {
a[j] = 0.0;
b[j] = 2.0;
c[j] = 2.0;
}
/* --- MAIN LOOP --- repeat Triad iterations times --- */
scalar = SCALAR;
for (k=0; k<iterations; k++) {
#pragma omp barrier
#pragma omp master
{
nstream_time = wtime();
}
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) a[j] = b[j]+scalar*c[j];
#pragma omp master
if (k>0 || iterations==1) { /* skip the first iteration */
nstream_time = wtime() - nstream_time;
avgtime = avgtime + nstream_time;
mintime = MIN(mintime, nstream_time);
maxtime = MAX(maxtime, nstream_time);
}
/* insert a dependency between iterations to avoid dead-code elimination */
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) b[j] = a[j];
}
} /* end of OpenMP parallel region */
/*********************************************************************
** Analyze and output results.
*********************************************************************/
bytes = 3.0 * sizeof(double) * length;
if (checkTRIADresults(iterations, length)) {
avgtime = avgtime/(double)(MAX(iterations-1,1));
printf("Rate (MB/s): %lf, Avg time (s): %lf, Min time (s): %lf",
1.0E-06 * bytes/mintime, avgtime, mintime);
printf(", Max time (s): %lf\n", maxtime);
}
else exit(EXIT_FAILURE);
return 0;
}
int checkTRIADresults (int iterations, long int length) {
double aj, bj, cj, scalar, asum;
double epsilon = 1.e-8;
long int j,k;
/* reproduce initialization */
aj = 0.0;
bj = 2.0;
cj = 2.0;
/* now execute timing loop */
scalar = SCALAR;
for (k=0; k<iterations; k++) {
aj = bj+scalar*cj;
bj = aj;
}
aj = aj * (double) (length);
asum = 0.0;
for (j=0; j<length; j++) asum += a[j];
#ifdef VERBOSE
printf ("Results Comparison: \n");
printf (" Expected checksum: %f\n",aj);
printf (" Observed checksum: %f\n",asum);
#endif
if (ABS(aj-asum)/asum > epsilon) {
printf ("Failed Validation on output array\n");
#ifndef VERBOSE
printf (" Expected checksum: %f \n",aj);
printf (" Observed checksum: %f \n",asum);
#endif
return (0);
}
else {
printf ("Solution Validates\n");
return (1);
}
}
|
sphkmeans.c | /*!
\file
\brief A parallel spherical k-means program
\date Started 4/20/2013
\author George
*/
#define _SVID_SOURCE
#define _DEFAULT_SOURCE
#if 0
int main() {}
#else
#include <GKlib.h>
#include <bdmpi.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/mman.h>
#include <unistd.h>
#include <malloc.h>
/**************************************************************************/
/* data structures */
/**************************************************************************/
typedef struct {
int npes, mype, nthreads;
BDMPI_Comm comm;
int nclusters;
int ntrials, niters;
char *filename;
int mlock;
/* the total number of rows and their overall distribution */
int nrows, ncols;
int *rowdist;
/* timers */
double totalTmr;
double compTmr;
double commTmr;
} params_t;
typedef struct {
int val;
int loc;
} vlp_ii_t;
/**************************************************************************/
/* prototypes */
/**************************************************************************/
gk_csr_t *LoadData(params_t *params);
void WriteClustering(params_t *params, gk_csr_t *mat, int *cvec);
void PreprocessData(params_t *params, gk_csr_t *mat);
int *ClusterData(params_t *params, gk_csr_t *mat);
void ComputeClusteringStatistics(params_t *params, gk_csr_t *mat, int *cpart);
void printInCoreInfo(char *msg, int mype, gk_csr_t *mat);
/**************************************************************************/
/**************************************************************************/
int main(int argc, char **argv)
{
params_t *params;
gk_csr_t *mat;
int *cvec;
BDMPI_Status status;
double max, current;
setbuf(stdout, NULL);
setbuf(stderr, NULL);
BDMPI_Init(&argc, &argv);
params = (params_t *)gk_malloc(sizeof(params_t), "params");
memset(params, 0, sizeof(params_t));
params->comm = BDMPI_COMM_WORLD;
BDMPI_Comm_size(params->comm, &(params->npes));
BDMPI_Comm_rank(params->comm, &(params->mype));
if (argc != 7) {
if (params->mype == 0)
fprintf(stderr, "Usage: %s filename #clusters #trials #iters #threads mlock[0;1]\n", argv[0]);
goto DONE;
}
params->filename = strdup(argv[1]);
params->nclusters = atoi(argv[2]);
params->ntrials = atoi(argv[3]);
params->niters = atoi(argv[4]);
params->nthreads = atoi(argv[5]);
params->mlock = atoi(argv[6]);
printf("[%3d] nclusters: %d, ntrials: %d, niters: %d, nthreads: %d, mlock: %d\n",
params->mype, params->nclusters, params->ntrials, params->niters, params->nthreads,
params->mlock);
omp_set_num_threads(params->nthreads);
gk_clearwctimer(params->totalTmr);
gk_clearwctimer(params->compTmr);
gk_clearwctimer(params->commTmr);
BDMPI_Barrier(params->comm);
BDMPI_Barrier(params->comm);
gk_startwctimer(params->totalTmr);
mat = LoadData(params);
PreprocessData(params, mat);
srand(params->mype+101);
printf("[%03d] timestamp01: %zu\n", params->mype, (size_t)time(NULL));
cvec = ClusterData(params, mat);
printf("[%03d] timestamp02: %zu\n", params->mype, (size_t)time(NULL));
//WriteClustering(params, mat, cvec);
BDMPI_Barrier(params->comm);
BDMPI_Barrier(params->comm);
gk_stopwctimer(params->totalTmr);
/* print timing stats */
current = gk_getwctimer(params->compTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0)
printf(" compTmr: %10.4lf\n", max);
current = gk_getwctimer(params->commTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0)
printf(" commTmr: %10.4lf\n", max);
current = gk_getwctimer(params->totalTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0)
printf(" totalTmr: %10.4lf\n", max);
DONE:
BDMPI_Finalize();
return EXIT_SUCCESS;
}
/**************************************************************************/
/*! Reads a sparse matrix in binary CSR format. The same matrix is read
by everybody in order to simulate a large file clustering.
\returns the local portion of the matrix.
*/
/**************************************************************************/
gk_csr_t *LoadData(params_t *params)
{
int mype=params->mype, npes=params->npes;
int lrank, lsize, lnrows, flag;
size_t lnnz;
gk_csr_t *mat=NULL;
BDMPI_Status status;
BDMPI_Comm_lrank(params->comm, &lrank);
BDMPI_Comm_lsize(params->comm, &lsize);
if (mype == 0) {
if (!gk_fexists(params->filename))
gk_errexit(SIGERR, "File %s does not exist!\n", params->filename);
}
/* wait your turn */
if (lrank != 0)
BDMPI_Recv(&flag, 1, BDMPI_INT, mype-1, 1, params->comm, &status);
mat = gk_csr_Read(params->filename, GK_CSR_FMT_BINROW, 1, 0);
lnrows = mat->nrows;
lnnz = mat->rowptr[mat->nrows];
params->nrows = lnrows*npes;
params->ncols = mat->ncols;
if (mype == 0)
printf("[%3d] lnrows: %d, lnnz: %zu [ts: %d]\n", mype, mat->nrows, lnnz,
(int)time(NULL));
if (lrank != lsize-1)
BDMPI_Send(&flag, 1, BDMPI_INT, mype+1, 1, params->comm);
if (mype == 0)
printf("[%3d] params->nrows: %d, params->ncols: %d, tnnz: %zu [ts: %d]\n",
mype, params->nrows, params->ncols, npes*lnnz, (int)time(NULL));
return mat;
}
/**************************************************************************/
/*! Writes a clustering vector. It just let each process write its portion
to the file in a round-robin fashion.
*/
/**************************************************************************/
void WriteClustering(params_t *params, gk_csr_t *mat, int *cvec)
{
int npes=params->npes, mype=params->mype, dummy=0;
size_t i;
BDMPI_Status status;
FILE *fpout;
char outfile[1024];
sprintf(outfile, "%s.part.%d", params->filename, params->nclusters);
if (mype == 0) {
fpout = gk_fopen(outfile, "w", "outfile");
for (i=0; i<mat->nrows; i++)
fprintf(fpout, "%d\n", cvec[i]);
gk_fclose(fpout);
if (mype+1 < npes)
BDMPI_Send(&dummy, 1, BDMPI_INT, mype+1, 1, params->comm);
}
else {
BDMPI_Recv(&dummy, 1, BDMPI_INT, mype-1, 1, params->comm, &status);
fpout = gk_fopen(outfile, "a", "outfile");
for (i=0; i<mat->nrows; i++)
fprintf(fpout, "%d\n", cvec[i]);
gk_fclose(fpout);
if (mype+1 < npes)
BDMPI_Send(&mype, 1, BDMPI_INT, mype+1, 1, params->comm);
}
}
/**************************************************************************/
/*! This function performs various pre-processing steps on the matrix.
*/
/**************************************************************************/
void PreprocessData(params_t *params, gk_csr_t *mat)
{
gk_csr_Normalize(mat, GK_CSR_ROW, 2);
}
/**************************************************************************/
/*! This function computes the k-way clustering solution.
*/
/**************************************************************************/
int *ClusterData(params_t *params, gk_csr_t *mat)
{
int npes=params->npes, mype=params->mype;
size_t trial, iter, i, j, k, offset;
int nrows, ncols, nclusters, rnum, *rowind, *cpart=NULL, *bcpart=NULL, *tptr;
int lnmoves, gnmoves;
ssize_t *rowptr;
float *rowval, *centers=NULL;
float dnorms[params->nclusters], crval, bcrval;
vlp_ii_t lmaxloc, gmaxloc;
nclusters = params->nclusters;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
bcpart = gk_imalloc(nrows, "bcpart");
/* perform a number of random trials */
for (bcrval=0.0, trial=0; trial<params->ntrials; trial++) {
/* select the initial cluster seeds */
lmaxloc.val = rand();
lmaxloc.loc = mype;
BDMPI_Allreduce(&lmaxloc, &gmaxloc, 1, BDMPI_2INT, BDMPI_MAXLOC, params->comm);
if (mype == gmaxloc.loc) { /* this pe will be selecting the initial centers */
centers = gk_fsmalloc(nclusters*ncols, 0.0, "centers");
/* pick the centers */
rnum = RandomInRange(nrows/nclusters);
for (k=0; k<nclusters; k++) {
i = ((k+1)*rnum)%nrows;
for (j=rowptr[i]; j<rowptr[i+1]; j++)
centers[rowind[j]*nclusters+k] = rowval[j];
}
}
/* get into the iterative refinement */
cpart = gk_ismalloc(nrows, -1, "cpart");
for (iter=0; iter<params->niters; iter++) {
if (mype == gmaxloc.loc)
printf("Working on trial: %zu, iter: %zu\n", trial, iter);
else
centers = gk_fmalloc(nclusters*ncols, "centers");
BDMPI_Bcast(centers, ncols*nclusters, BDMPI_FLOAT, gmaxloc.loc, params->comm);
printf("[%03d]%04zu.%04zu.0 ts: %d\n", mype, trial, iter, (int)time(NULL));
if (params->mlock)
GKWARN(BDMPI_mlockall(MCL_CURRENT) == 0);
printf("[%03d]%04zu.%04zu.1 ts: %d\n", mype, trial, iter, (int)time(NULL));
/* assign each local row to the closest cluster */
gk_startwctimer(params->compTmr);
lnmoves = 0;
#pragma omp parallel default(none),\
shared(nrows, nclusters, rowptr, rowind, rowval, centers, cpart),\
private(i, j, k, offset),\
reduction(+:lnmoves)
{
float sims[nclusters];
#pragma omp for schedule(dynamic,32)
for (i=0; i<nrows; i++) {
for (k=0; k<nclusters; k++)
sims[k] = 0.0;
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
offset = rowind[j]*nclusters;
for (k=0; k<nclusters; k++)
sims[k] += rowval[j]*centers[offset+k];
}
k = gk_fargmax(nclusters, sims, 1);
if (k != cpart[i])
lnmoves++;
cpart[i] = k;
}
}
/* compute the new local centers */
gk_fset(nclusters*ncols, 0.0, centers);
for (i=0; i<nrows; i++) {
k = cpart[i];
for (j=rowptr[i]; j<rowptr[i+1]; j++)
centers[rowind[j]*nclusters+k] += rowval[j];
}
gk_stopwctimer(params->compTmr);
if (params->mlock)
GKWARN(BDMPI_munlockall() == 0);
printf("[%03d]%04zu.%04zu.2 ts: %d\n", mype, trial, iter, (int)time(NULL));
/* compute the new global centers */
BDMPI_Reduce(centers, centers, nclusters*ncols, BDMPI_FLOAT, BDMPI_SUM,
gmaxloc.loc, params->comm);
if (mype == gmaxloc.loc) {
if (params->mlock)
GKWARN(BDMPI_mlock(centers, ncols*nclusters*sizeof(float)) == 0);
for (k=0; k<nclusters; k++)
dnorms[k] = 0.0;
for (i=0; i<ncols; i++) {
offset = i*nclusters;
for (k=0; k<nclusters; k++)
dnorms[k] += centers[offset+k]*centers[offset+k];
}
for (crval=0.0, k=0; k<nclusters; k++) {
if (dnorms[k] > 0) {
crval += sqrt(dnorms[k]);
dnorms[k] = 1.0/sqrt(dnorms[k]);
}
}
for (i=0; i<ncols; i++) {
offset = i*nclusters;
for (k=0; k<nclusters; k++)
centers[offset+k] *= dnorms[k];
}
if (params->mlock)
GKWARN(BDMPI_munlock(centers, ncols*nclusters*sizeof(float)) == 0);
//printf("trial: %2zd; iter: %3zd; crval: %.8e; bcrval: %.8e\n", trial, iter, crval, bcrval);
}
else {
gk_free((void **)¢ers, LTERM);
}
/* see if you are done refining */
if (iter > 0) {
BDMPI_Allreduce(&lnmoves, &gnmoves, 1, BDMPI_INT, BDMPI_SUM, params->comm);
if (gnmoves == 0)
break;
}
}
BDMPI_Bcast(&crval, 1, BDMPI_FLOAT, gmaxloc.loc, params->comm);
if (crval > bcrval) {
gk_SWAP(cpart, bcpart, tptr);
bcrval = crval;
}
gk_free((void **)&cpart, LTERM);
if (mype == gmaxloc.loc)
printf("[%3zu:%3zu] gnmoves: %8d; crval: %8.4e; bcrval: %8.4e [ts: %d]\n",
trial, iter, gnmoves, crval, bcrval, (int)time(NULL));
}
gk_free((void **)¢ers, LTERM);
return bcpart;
}
/**************************************************************************/
/*! This function prints final statistics for the clustering solution. */
/**************************************************************************/
void ComputeClusteringStatistics(params_t *params, gk_csr_t *mat, int *cpart)
{
int npes=params->npes, mype=params->mype, nclusters=params->nclusters;
size_t i, j, k, offset;
int nrows, ncols, *rowind, *pwgts;
ssize_t *rowptr;
float *rowval, *centers, *dnorms;
float crval, tcrval;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
centers = gk_fsmalloc(nclusters*ncols, 0.0, "centers");
pwgts = gk_ismalloc(nclusters, 0, "pwgts");
/* compute the local centers and local partition weights */
gk_fset(nclusters*ncols, 0.0, centers);
for (i=0; i<nrows; i++) {
k = cpart[i];
pwgts[k]++;
for (j=rowptr[i]; j<rowptr[i+1]; j++)
centers[rowind[j]*nclusters+k] += rowval[j];
}
/* compute global centroids and partition weights */
BDMPI_Reduce(pwgts, pwgts, nclusters, BDMPI_INT, BDMPI_SUM, 0, params->comm);
BDMPI_Reduce(centers, centers, nclusters*ncols, BDMPI_FLOAT, BDMPI_SUM, 0, params->comm);
if (mype == 0) {
dnorms = gk_fsmalloc(nclusters, 0.0, "dnorms");
for (i=0; i<ncols; i++) {
offset = i*nclusters;
for (k=0; k<nclusters; k++)
dnorms[k] += centers[offset+k]*centers[offset+k];
}
for (tcrval=0.0, k=0; k<nclusters; k++) {
crval = (dnorms[k] > 0 ? sqrt(dnorms[k]) : 0.0);
tcrval += crval;
printf("Cluster: %4zu %6d %.4e\n", k, pwgts[k], crval);
}
printf("Overall: %.4e\n", tcrval);
gk_free((void **)&dnorms, LTERM);
}
gk_free((void **)¢ers, &pwgts, LTERM);
}
/**************************************************************************/
/*! Print residency information about the various elements of the matrix */
/**************************************************************************/
void printInCoreInfo(char *msg, int mype, gk_csr_t *mat)
{
size_t i, vlen, pagesize, addr, size, offset, counts1[2], counts2[2], counts3[2];
unsigned char *vec;
struct rusage usage;
char *ptr;
pagesize = sysconf(_SC_PAGESIZE);
vlen = mat->rowptr[mat->nrows]*sizeof(int)/pagesize + 10;
vec = (unsigned char *)gk_malloc(sizeof(unsigned char *)*vlen, "vec");
counts1[0] = counts1[1] = 0;
counts2[0] = counts2[1] = 0;
counts3[0] = counts3[1] = 0;
ptr = (char *)mat->rowptr;
size = sizeof(ssize_t)*(mat->nrows+1);
addr = (size_t)ptr;
offset = addr%pagesize;
ptr -= offset;
size += offset;
if (mincore(ptr, size, vec) == -1)
printf("mincore error for rowptr: %s [%zu %zu %zu]\n", strerror(errno),
pagesize, addr, addr%pagesize);
else {
vlen = (size+pagesize-1)/pagesize;
for (i=0; i<vlen; i++)
counts1[vec[i]&1]++;
}
ptr = (char *)mat->rowind;
size = sizeof(int)*(mat->rowptr[mat->nrows]);
addr = (size_t)ptr;
offset = addr%pagesize;
ptr -= offset;
size += offset;
if (mincore(ptr, size, vec) == -1)
printf("mincore error for rowind: %s [%zu %zu %zu]\n", strerror(errno),
pagesize, addr, addr%pagesize);
else {
vlen = (size+pagesize-1)/pagesize;
for (i=0; i<vlen; i++)
counts2[vec[i]&1]++;
}
ptr = (char *)mat->rowval;
size = sizeof(int)*(mat->rowptr[mat->nrows]);
addr = (size_t)ptr;
offset = addr%pagesize;
ptr -= offset;
size += offset;
if (mincore(ptr, size, vec) == -1)
printf("mincore error for rowval: %s [%zu %zu %zu]\n", strerror(errno),
pagesize, addr, addr%pagesize);
else {
vlen = (size+pagesize-1)/pagesize;
for (i=0; i<vlen; i++)
counts3[vec[i]&1]++;
}
gk_free((void **)&vec, LTERM);
if (getrusage(RUSAGE_SELF, &usage) == -1) {
printf("getrusage error: %s\n", strerror(errno));
}
else {
printf("[%03d]%s [%5zu %5zu] [%5zu %5zu] [%5zu %5zu] mrss: %ld minf: %ld majf: %ld\n",
mype, msg,
counts1[0], counts1[1],
counts2[0], counts2[1],
counts3[0], counts3[1],
usage.ru_maxrss, usage.ru_minflt, usage.ru_majflt);
}
}
#endif
|
mm.c | /*
Henrique, eu resolvi testar no meu proprio PC ao inves do server da PUC, porque
lá demora demais, e se um unico aluno alem de mim resolver testar, o programa dele
vai ocupar CPU time junto com o meu e os tempos vao ficar diferentes. Preferi testar no meu PC
tambem porque assim eu consigo ter controle de quais programas rodam em background e saber que eu fechei tudo,
pra minha CPU ficar exclusivamente pra rodar esse codigo.
Config do meu PC:
AMD Ryzen 5 5600X (6 cores 12 threads)
Arch Linux - 5.13.12-arch1-1
GCC 11.1.0
Tempo - Sequencial
real 0m24.447s
user 0m24.413s
sys 0m0.023s
real 0m24.433s
user 0m24.406s
sys 0m0.017s
real 0m24.379s
user 0m24.231s
sys 0m0.093s
real 0m24.330s
user 0m24.173s
sys 0m0.083s
real 0m24.346s
user 0m24.319s
sys 0m0.017s
real 0m24.370s
user 0m24.340s
sys 0m0.020s
real 0m24.416s
user 0m24.382s
sys 0m0.023s
real 0m24.415s
user 0m24.379s
sys 0m0.027s
==========================
Tempo - Paralelo
real 0m3.762s
user 0m43.229s
sys 0m1.277s
real 0m3.746s
user 0m43.324s
sys 0m1.323s
real 0m3.726s
user 0m42.869s
sys 0m1.232s
real 0m3.761s
user 0m43.387s
sys 0m1.387s
real 0m3.791s
user 0m43.662s
sys 0m1.340s
real 0m3.748s
user 0m43.290s
sys 0m1.284s
real 0m3.765s
user 0m43.465s
sys 0m1.289s
real 0m3.801s
user 0m43.572s
sys 0m1.314s
*/
#include <stdio.h>
#include <stdlib.h>
void mm(double* a, double* b, double* c, int width)
{
#pragma omp parallel for collapse(2)
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
double sum = 0;
#pragma omp parallel for
for (int k = 0; k < width; k++) {
double x = a[i * width + k];
double y = b[k * width + j];
sum += x * y;
}
c[i * width + j] = sum;
}
}
}
int main()
{
int width = 2000;
double *a = (double*) malloc (width * width * sizeof(double));
double *b = (double*) malloc (width * width * sizeof(double));
double *c = (double*) malloc (width * width * sizeof(double));
#pragma omp parallel for collapse(2)
for(int i = 0; i < width; i++) {
for(int j = 0; j < width; j++) {
a[i*width+j] = i;
b[i*width+j] = j;
c[i*width+j] = 0;
}
}
mm(a,b,c,width);
// for(int i = 0; i < width; i++) {
// for(int j = 0; j < width; j++) {
// printf("\n c[%d][%d] = %f",i,j,c[i*width+j]);
// }
// }
}
|
GB_unop__identity_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dropout_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstring>
#include <random>
#include <string>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
class CPUDropoutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* seed =
context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr;
auto* y = context.Output<Tensor>("Out");
const auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
float dropout_prob = context.Attr<float>("dropout_prob");
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
if (!context.Attr<bool>("is_test")) {
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace());
size_t size = framework::product(mask->dims());
// Special case when dropout_prob is 1.0
if (dropout_prob == 1.0f) {
std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT
std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT
return;
}
bool init_generator_py = framework::Generator::GetInstance()->is_init_py;
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
std::random_device rnd;
std::minstd_rand engine;
int seed_data;
if (seed) {
seed_data = *(seed->data<int>());
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd();
}
engine.seed(seed_data);
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
float cur_random =
init_generator_py
? dist(framework::Generator::GetInstance()->GetCPUEngine())
: dist(engine);
if (cur_random < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {
mask_data[i] = 1;
if (upscale_in_train) {
y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob);
} else {
y_data[i] = x_data[i];
}
}
}
} else {
if (upscale_in_train) {
const auto* X_data = x->data<T>();
auto* Y_data = y->mutable_data<T>(context.GetPlace());
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < x->numel(); i++) {
Y_data[i] = X_data[i];
}
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
}
}
}
};
template <typename DeviceContext, typename T>
class DropoutGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(!context.Attr<bool>("is_test"), true,
platform::errors::PreconditionNotMet(
"GradOp is only callable when is_test is false"));
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* mask = context.Input<Tensor>("Mask");
grad_x->mutable_data<T>(context.GetPlace());
auto M = EigenMatrix<uint8_t>::Reshape(*mask, 1);
auto dX = EigenMatrix<T>::Reshape(*grad_x, 1);
auto dY = EigenMatrix<T>::Reshape(*grad_y, 1);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
if (dropout_implementation == "upscale_in_train") {
float dropout_prob = context.Attr<float>("dropout_prob");
if (dropout_prob == 1.0f) {
dX.device(place) = static_cast<T>(0) * dY;
} else {
dX.device(place) =
dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob);
}
} else {
dX.device(place) = dY * M.cast<T>();
}
}
};
} // namespace operators
} // namespace paddle
|
omp_taskyield.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int test_omp_taskyield()
{
int i;
int count = 0;
int start_tid[NUM_TASKS];
int current_tid[NUM_TASKS];
for (i=0; i< NUM_TASKS; i++) {
start_tid[i]=0;
current_tid[i]=0;
}
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
int myi = i;
#pragma omp task untied
{
my_sleep(SLEEPTIME);
start_tid[myi] = omp_get_thread_num();
#pragma omp taskyield
if((start_tid[myi] %2) ==0){
my_sleep(SLEEPTIME);
current_tid[myi] = omp_get_thread_num();
} /*end of if*/
} /* end of omp task */
} /* end of for */
} /* end of single */
} /* end of parallel */
for (i=0;i<NUM_TASKS; i++) {
//printf("start_tid[%d]=%d, current_tid[%d]=%d\n",
//i, start_tid[i], i , current_tid[i]);
if (current_tid[i] == start_tid[i])
count++;
}
return (count<NUM_TASKS);
}
int main()
{
int i;
int num_failed=0;
if (omp_get_max_threads() < 2)
omp_set_num_threads(8);
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_taskyield()) {
num_failed++;
}
}
return num_failed;
}
|
CutPursuit_L2.h | #pragma once
#include "CutPursuit.h"
#include "Common.h"
namespace CP {
template <typename T>
class CutPursuit_L2 : public CutPursuit<T>
{
public:
~CutPursuit_L2(){
};
//=============================================================================================
//============================= COMPUTE ENERGY ===========================================
//=============================================================================================
virtual std::pair<T,T> compute_energy() override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//the first element pair_energy of is the fidelity and the second the penalty
std::pair<T,T> pair_energy;
T energy = 0;
//#pragma omp parallel for private(i_dim) if (this->parameter.parallel) schedule(static) reduction(+:energy,i)
for (uint32_t ind_ver = 0; ind_ver < this->nVertex; ind_ver++)
{
VertexDescriptor<T> i_ver = boost::vertex(ind_ver, this->main_graph);
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
energy += .5*vertex_attribute_map(i_ver).weight
* pow(vertex_attribute_map(i_ver).observation[i_dim]
- vertex_attribute_map(i_ver).value[i_dim],2);
}
}
pair_energy.first = energy;
energy = 0;
EdgeIterator<T> i_edg, i_edg_end = boost::edges(this->main_graph).second;
for (i_edg = boost::edges(this->main_graph).first; i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
energy += .5 * edge_attribute_map(*i_edg).isActive * this->parameter.reg_strenth
* edge_attribute_map(*i_edg).weight;
}
pair_energy.second = energy;
return pair_energy;
}
//=============================================================================================
//============================= SPLIT ===========================================
//=============================================================================================
virtual uint32_t split() override
{ // split the graph by trying to find the best binary partition
// each components is split into B and notB
// for each components we associate the value h_1 and h_2 to vertices in B or notB
// the affectation as well as h_1 and h_2 are computed alternatively
//tic();
//--------loading structures---------------------------------------------------------------
uint32_t nb_comp = this->components.size();
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t saturation;
//stores wether each vertex is B or not
std::vector<bool> binary_label(this->nVertex);
//initialize the binary partition with kmeans
this->init_labels(binary_label);
//centers is the value of each binary component in the optimal partition
VectorOfCentroids<T> centers(nb_comp, this->dim);
//-----main loop----------------------------------------------------------------
// the optimal flow is iteratively approximated
for (uint32_t i_step = 1; i_step <= this->parameter.flow_steps; i_step++)
{
//the regularization strength at this step
//compute h_1 and h_2
centers = VectorOfCentroids<T>(nb_comp, this->dim);
this->compute_centers(centers, nb_comp,binary_label);
this->set_capacities(centers);
// update the capacities of the flow graph
boost::boykov_kolmogorov_max_flow(
this->main_graph,
get(&EdgeAttribute<T>::capacity , this->main_graph),
get(&EdgeAttribute<T>::residualCapacity, this->main_graph),
get(&EdgeAttribute<T>::edge_reverse , this->main_graph),
get(&VertexAttribute<T>::color , this->main_graph),
get(boost::vertex_index , this->main_graph),
this->source,
this->sink);
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
binary_label[vertex_index_map(this->components[ind_com][i_ver])]
= (vertex_attribute_map(this->components[ind_com][i_ver]).color
== vertex_attribute_map(this->sink).color);
}
}
}
saturation = this->activate_edges();
return saturation;
}
//=============================================================================================
//============================= INIT_L2 ====== ===========================================
//=============================================================================================
inline void init_labels(std::vector<bool> & binary_label)
{ //-----initialize the labelling for each components with kmeans------------------------------
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t nb_comp = this->components.size();
// ind_com;
//#pragma omp parallel for private(ind_com) //if (nb_comp>=8) schedule(dynamic)
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
std::vector< std::vector<T> > kernels(2, std::vector<T>(this->dim));
T total_weight[2];
T best_energy;
T current_energy;
uint32_t comp_size = this->components[ind_com].size();
std::vector<bool> potential_label(comp_size);
std::vector<T> energy_array(comp_size);
if (this->saturated_components[ind_com] || comp_size <= 1)
{
continue;
}
for (uint32_t init_kmeans = 0; init_kmeans < this->parameter.kmeans_resampling; init_kmeans++)
{//proceed to several initilialisation of kmeans and pick up the best one
//----- initialization with KM++ ------------------
uint32_t first_kernel = std::rand() % comp_size, second_kernel = 0; // first kernel attributed
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = vertex_attribute_map(this->components[ind_com][first_kernel ]).observation[i_dim];
}
best_energy = 0; //now compute the square distance of each pouint32_tto this kernel
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(best_energy) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
energy_array[i_ver] = 0;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
energy_array[i_ver] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
best_energy += energy_array[i_ver];
} // we now generate a random number to determinate which node will be the second kernel
T random_sample = ((T)(rand())) / ((T)(RAND_MAX));
current_energy = best_energy * random_sample;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
current_energy -= energy_array[i_ver];
if (current_energy < 0)
{ //we have selected the second kernel
second_kernel = i_ver;
break;
}
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{ // now fill the second kernel
kernels[1][i_dim] = vertex_attribute_map(this->components[ind_com][second_kernel]).observation[i_dim];
}
//----main kmeans loop-----
for (uint32_t ite_kmeans = 0; ite_kmeans < this->parameter.kmeans_ite; ite_kmeans++)
{
//--affectation step: associate each node with its closest kernel-------------------
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
std::vector<T> distance_kernels(2);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
distance_kernels[0] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2);
distance_kernels[1] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[1][i_dim],2);
}
potential_label[i_ver] = distance_kernels[0] > distance_kernels[1];
}
//-----computation of the new kernels----------------------------
total_weight[0] = 0.;
total_weight[1] = 0.;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = 0;
kernels[1][i_dim] = 0;
}
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0)
{
continue;
}
if (potential_label[i_ver])
{
total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
//std::cout << "kmeans error : " << comp_size << std::endl;
break;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = kernels[0][i_dim] / total_weight[0];
kernels[1][i_dim] = kernels[1][i_dim] / total_weight[1];
}
}
//----compute the associated energy ------
current_energy = 0;
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
if (potential_label[i_ver])
{
current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
else
{
current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[1][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if (current_energy < best_energy)
{
best_energy = current_energy;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
binary_label[vertex_index_map(this->components[ind_com][i_ver])] = potential_label[i_ver];
}
}
}
}
}
//=============================================================================================
//============================= COMPUTE_CENTERS_L2 ==========================================
//=============================================================================================
inline void compute_centers(VectorOfCentroids<T> & centers, const uint32_t & nb_comp
, const std::vector<bool> & binary_label)
{
//compute for each component the values of h_1 and h_2
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{
continue;
}
compute_center(centers.centroids[ind_com], ind_com, binary_label);
}
return;
}
//=============================================================================================
//============================= COMPUTE_CENTERS_L2 ==========================================
//=============================================================================================
inline void compute_center( std::vector< std::vector<T> > & center, const uint32_t & ind_com
, const std::vector<bool> & binary_label)
{
//compute for each component the values of the centroids corresponding to the optimal binary partition
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
T total_weight[2];
total_weight[0] = 0.;
total_weight[1] = 0.;
//#pragma omp parallel for if (this->parameter.parallel)
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0)
{
continue;
}
if (binary_label[vertex_index_map(this->components[ind_com][i_ver])])
{
total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
//the component is saturated
this->saturateComponent(ind_com);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim];
center[1][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim];
}
}
else
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] = center[0][i_dim] / total_weight[0];
center[1][i_dim] = center[1][i_dim] / total_weight[1];
}
}
return;
}
//=============================================================================================
//============================= SET_CAPACITIES ==========================================
//=============================================================================================
inline void set_capacities(const VectorOfCentroids<T> & centers)
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//----first compute the capacity in sink/node edges------------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
uint32_t nb_comp = this->components.size();
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
VertexDescriptor<T> desc_v;
EdgeDescriptor desc_source2v, desc_v2sink, desc_v2source;
T cost_B, cost_notB; //the cost of being in B or not B, local for each component
if (this->saturated_components[ind_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
desc_v = this->components[ind_com][i_ver];
// because of the adjacency structure NEVER access edge (source,v) directly!
desc_v2source = boost::edge(desc_v, this->source,this->main_graph).first;
desc_source2v = edge_attribute_map(desc_v2source).edge_reverse; //use edge_reverse instead
desc_v2sink = boost::edge(desc_v, this->sink,this->main_graph).first;
cost_B = 0;
cost_notB = 0;
if (vertex_attribute_map(desc_v).weight==0)
{ //no observation - no cut
edge_attribute_map(desc_source2v).capacity = 0;
edge_attribute_map(desc_v2sink).capacity = 0;
continue;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
cost_B += 0.5*vertex_attribute_map(desc_v).weight
* (pow(centers.centroids[ind_com][0][i_dim],2) - 2 * (centers.centroids[ind_com][0][i_dim]
* vertex_attribute_map(desc_v).observation[i_dim]));
cost_notB += 0.5*vertex_attribute_map(desc_v).weight
* (pow(centers.centroids[ind_com][1][i_dim],2) - 2 * (centers.centroids[ind_com][1][i_dim]
* vertex_attribute_map(desc_v).observation[i_dim]));
}
if (cost_B>cost_notB)
{
edge_attribute_map(desc_source2v).capacity = cost_B - cost_notB;
edge_attribute_map(desc_v2sink).capacity = 0.;
}
else
{
edge_attribute_map(desc_source2v).capacity = 0.;
edge_attribute_map(desc_v2sink).capacity = cost_notB - cost_B;
}
}
}
//----then set the vertex to vertex edges ---------------------------------------------
EdgeIterator<T> i_edg, i_edg_end;
for (boost::tie(i_edg, i_edg_end) = boost::edges(this->main_graph);
i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
if (!edge_attribute_map(*i_edg).isActive)
{
edge_attribute_map(*i_edg).capacity
= edge_attribute_map(*i_edg).weight * this->parameter.reg_strenth;
}
else
{
edge_attribute_map(*i_edg).capacity = 0;
}
}
}
//=============================================================================================
//================================= COMPUTE_VALUE =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & ind_com) override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
T total_weight = 0;
std::vector<T> compValue(this->dim);
std::fill((compValue.begin()),(compValue.end()),0);
#pragma omp parallel for if (this->parameter.parallel) schedule(static)
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
total_weight += vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] += vertex_attribute_map(this->components[ind_com][ind_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
}
vertex_attribute_map(this->components[ind_com][ind_ver]).in_component = ind_com;
}
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] = compValue[i_dim] / total_weight;
}
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
vertex_attribute_map(this->components[ind_com][ind_ver]).value[i_dim] = compValue[i_dim];
}
}
return std::pair<std::vector<T>, T>(compValue, total_weight);
}
//=============================================================================================
//================================= COMPUTE_MERGE_GAIN =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1
, const VertexDescriptor<T> & comp2) override
{
VertexAttributeMap<T> reduced_vertex_attribute_map
= boost::get(boost::vertex_bundle, this->reduced_graph);
std::vector<T> merge_value(this->dim);
T gain = 0;
// compute the value obtained by mergeing the two connected components
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
merge_value[i_dim] =
(reduced_vertex_attribute_map(comp1).weight *
reduced_vertex_attribute_map(comp1).value[i_dim]
+reduced_vertex_attribute_map(comp2).weight *
reduced_vertex_attribute_map(comp2).value[i_dim])
/(reduced_vertex_attribute_map(comp1).weight
+reduced_vertex_attribute_map(comp2).weight);
gain += 0.5 * (pow(merge_value[i_dim],2)
* (reduced_vertex_attribute_map(comp1).weight
+reduced_vertex_attribute_map(comp2).weight)
- pow(reduced_vertex_attribute_map(comp1).value[i_dim],2)
* reduced_vertex_attribute_map(comp1).weight
- pow(reduced_vertex_attribute_map(comp2).value[i_dim],2)
* reduced_vertex_attribute_map(comp2).weight);
}
return std::pair<std::vector<T>, T>(merge_value, gain);
}
};
}
|
flux_avx512.c |
/*
Author: Mohammed Al Farhan
Email: mohammed.farhan@kaust.edu.sa
*/
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include <mathimf.h>
#include <immintrin.h>
#include "inc/ktime.h"
#include "inc/geometry.h"
#include "inc/ker/phy.h"
#define MAG0 (0.5 / 3)
#define MAG1 (-MAG0)
/*
Calculates the residual
*/
void
compute_flux(struct flux *restrict flux)
{
struct ktime ktime;
setktime(&ktime);
const size_t bsz = flux->bsz;
const size_t nfnodes = flux->nfnodes;
const size_t dofs = flux->dofs;
const uint32_t snfc = flux->snfc;
const double pressure = flux->pressure;
const double velocity_u = flux->velocity_u;
const double velocity_v = flux->velocity_v;
const double velocity_w = flux->velocity_w;
const double *restrict f_xyz0 = flux->f_xyz0;
const double *restrict f_xyz1 = flux->f_xyz1;
const double *restrict f_xyz2 = flux->f_xyz2;
const double *restrict xyz0 = flux->xyz0;
const double *restrict xyz1 = flux->xyz1;
const double *restrict xyz2 = flux->xyz2;
const double *restrict x0 = flux->x0;
const double *restrict x1 = flux->x1;
const double *restrict x2 = flux->x2;
const double *restrict x3 = flux->x3;
const double *restrict q = flux->q;
const double *restrict gradx0 = flux->gradx0;
const double *restrict gradx1 = flux->gradx1;
const double *restrict gradx2 = flux->gradx2;
const uint32_t *restrict ie = flux->ie;
const uint32_t *restrict part = flux->part;
const uint32_t *restrict snfic = flux->snfic;
const uint32_t *restrict n0 = flux->n0;
const uint32_t *restrict n1 = flux->n1;
const uint32_t *restrict nfptr = flux->nfptr;
const uint32_t *restrict sn0 = flux->sn0;
const uint32_t *restrict sn1 = flux->sn1;
const uint32_t *restrict sn2 = flux->sn2;
double *restrict r = flux->r;
memset(r, 0, dofs * sizeof(double));
__assume_aligned(x0, 64);
__assume_aligned(x1, 64);
__assume_aligned(x2, 64);
__assume_aligned(x3, 64);
__assume_aligned(gradx0, 64);
__assume_aligned(gradx1, 64);
__assume_aligned(gradx2, 64);
__assume_aligned(r, 64);
/* AVX512 Registers */
const __m512d _zero = _mm512_set1_pd(0);
const __m512d _pos1 = _mm512_set1_pd(1.0);
const __m512d _pos2 = _mm512_set1_pd(2.0);
const __m512d _half = _mm512_set1_pd(0.5);
const __m512d _nhalf = _mm512_set1_pd(-0.5);
const __m512d _nu95 = _mm512_set1_pd(0.95);
const __m512d _beta = _mm512_set1_pd(BETA);
#ifdef __USE_SKX
const __m512d _rbeta = _mm512_rcp14_pd(_beta);
#else
const __m512d _rbeta = _mm512_rcp28_pd(_beta);
#endif
const __m256i _bsz = _mm256_set1_epi32(bsz);
const __m256i _shift1 = _mm256_set1_epi32(1);
const __m256i _shift2 = _mm256_set1_epi32(2);
const __m256i _shift3 = _mm256_set1_epi32(3);
const __m512i _ng = _mm512_set1_epi32(-1);
const __m512d _und = _mm512_undefined_pd();
/*
Calculates the fluxes on the face and performs the flux balance
*/
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
const uint32_t lim = ie1 - ((ie1-ie0) % 8);
const __m512i _t = _mm512_set1_epi32(t);
uint32_t i;
for(i = ie0; i < lim; i+=8)
{
const __m512d _xn = _mm512_load_pd((void const *) &x0[i]);
const __m512d _yn = _mm512_load_pd((void const *) &x1[i]);
const __m512d _zn = _mm512_load_pd((void const *) &x2[i]);
const __m512d _ln = _mm512_load_pd((void const *) &x3[i]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
const __m512d _fdot = _mm512_abs_pd(_xn);
__mmask _k0;
__m512d _dot, _X1, _Y1, _Z1;
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_LT_OS);
_X1 = _mm512_mask_fnmadd_pd(_xn, _k0, _xn, _pos1);
_Y1 = _mm512_mask_fnmadd_pd(_yn, _k0, _xn, _zero);
_Z1 = _mm512_mask_fnmadd_pd(_zn, _k0, _xn, _zero);
_k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_GE_OS);
_X1 = _mm512_mask_fnmadd_pd(_X1, _k0, _yn, _zero);
_Y1 = _mm512_mask_fnmadd_pd(_Y1, _k0, _yn, _pos1);
_Z1 = _mm512_mask_fnmadd_pd(_Z1, _k0, _yn, _zero);
/*
Normalize the first vector
*/
__m512d _size;
_size = _mm512_mul_pd(_X1, _X1);
_size = _mm512_fmadd_pd(_Y1, _Y1, _size);
_size = _mm512_fmadd_pd(_Z1, _Z1, _size);
#ifdef __USE_SKX
_size = _mm512_rsqrt14_pd(_size);
#else
_size = _mm512_rsqrt28_pd(_size);
#endif
_X1 = _mm512_mul_pd(_X1, _size);
_Y1 = _mm512_mul_pd(_Y1, _size);
_Z1 = _mm512_mul_pd(_Z1, _size);
const __m256i _n0 = _mm256_load_si256((__m256i const *) &n0[i]);
const __m256i _n1 = _mm256_load_si256((__m256i const *) &n1[i]);
const __m512d _x00 = _mm512_i32gather_pd(_n0, &xyz0[0], 8);
const __m512d _x01 = _mm512_i32gather_pd(_n0, &xyz1[0], 8);
const __m512d _x02 = _mm512_i32gather_pd(_n0, &xyz2[0], 8);
const __m512d _x10 = _mm512_i32gather_pd(_n1, &xyz0[0], 8);
const __m512d _x11 = _mm512_i32gather_pd(_n1, &xyz1[0], 8);
const __m512d _x12 = _mm512_i32gather_pd(_n1, &xyz2[0], 8);
const __m512d _xmean = _mm512_mul_pd(_half, _mm512_add_pd(_x00, _x10));
const __m512d _ymean = _mm512_mul_pd(_half, _mm512_add_pd(_x01, _x11));
const __m512d _zmean = _mm512_mul_pd(_half, _mm512_add_pd(_x02, _x12));
/*
Take cross-product of normal and V1 to get V2
*/
const __m512d _X2 = _mm512_fmsub_pd(_yn, _Z1, _mm512_mul_pd(_zn, _Y1));
const __m512d _Y2 = _mm512_fmsub_pd(_zn, _X1, _mm512_mul_pd(_xn, _Z1));
const __m512d _Z2 = _mm512_fmsub_pd(_xn, _Y1, _mm512_mul_pd(_yn, _X1));
/*
Compute the stride indices
*/
const __m256i _idx0 = _mm256_mullo_epi32(_bsz, _n0);
const __m256i _idx1 = _mm256_mullo_epi32(_bsz, _n1);
const __m256i _idx01 = _mm256_add_epi32(_idx0, _shift1);
const __m256i _idx11 = _mm256_add_epi32(_idx1, _shift1);
const __m256i _idx02 = _mm256_add_epi32(_idx0, _shift2);
const __m256i _idx12 = _mm256_add_epi32(_idx1, _shift2);
const __m256i _idx03 = _mm256_add_epi32(_idx0, _shift3);
const __m256i _idx13 = _mm256_add_epi32(_idx1, _shift3);
/*
Get variables on "left" and "right" side of face
*/
__m512d _q;
__m512d _ubarL, _ubarR;
__m512d _rx, _ry, _rz;
__m512d _g0, _g1, _g2;
__m512d _pL, _uL, _vL, _wL;
__m512d _pR, _uR, _vR, _wR;
/* Left */
_rx = _mm512_sub_pd(_xmean, _x00);
_ry = _mm512_sub_pd(_ymean, _x01);
_rz = _mm512_sub_pd(_zmean, _x02);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx0, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx0, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx0, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx0, &q[0], 8);
_pL = _mm512_fmadd_pd(_g0, _rx, _q);
_pL = _mm512_fmadd_pd(_g1, _ry, _pL);
_pL = _mm512_fmadd_pd(_g2, _rz, _pL);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx01, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx01, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx01, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx01, &q[0], 8);
_uL = _mm512_fmadd_pd(_g0, _rx, _q);
_uL = _mm512_fmadd_pd(_g1, _ry, _uL);
_uL = _mm512_fmadd_pd(_g2, _rz, _uL);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx02, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx02, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx02, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx02, &q[0], 8);
_vL = _mm512_fmadd_pd(_g0, _rx, _q);
_vL = _mm512_fmadd_pd(_g1, _ry, _vL);
_vL = _mm512_fmadd_pd(_g2, _rz, _vL);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx03, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx03, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx03, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx03, &q[0], 8);
_wL = _mm512_fmadd_pd(_g0, _rx, _q);
_wL = _mm512_fmadd_pd(_g1, _ry, _wL);
_wL = _mm512_fmadd_pd(_g2, _rz, _wL);
_ubarL = _mm512_mul_pd(_xn, _uL);
_ubarL = _mm512_fmadd_pd(_yn, _vL, _ubarL);
_ubarL = _mm512_fmadd_pd(_zn, _wL, _ubarL);
/* Right */
_rx = _mm512_sub_pd(_xmean, _x10);
_ry = _mm512_sub_pd(_ymean, _x11);
_rz = _mm512_sub_pd(_zmean, _x12);
/* Pressure */
_g0 = _mm512_i32gather_pd(_idx1, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx1, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx1, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx1, &q[0], 8);
_pR = _mm512_fmadd_pd(_g0, _rx, _q);
_pR = _mm512_fmadd_pd(_g1, _ry, _pR);
_pR = _mm512_fmadd_pd(_g2, _rz, _pR);
/* Velocity u */
_g0 = _mm512_i32gather_pd(_idx11, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx11, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx11, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx11, &q[0], 8);
_uR = _mm512_fmadd_pd(_g0, _rx, _q);
_uR = _mm512_fmadd_pd(_g1, _ry, _uR);
_uR = _mm512_fmadd_pd(_g2, _rz, _uR);
/* Velocity v */
_g0 = _mm512_i32gather_pd(_idx12, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx12, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx12, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx12, &q[0], 8);
_vR = _mm512_fmadd_pd(_g0, _rx, _q);
_vR = _mm512_fmadd_pd(_g1, _ry, _vR);
_vR = _mm512_fmadd_pd(_g2, _rz, _vR);
/* Velocity w */
_g0 = _mm512_i32gather_pd(_idx13, &gradx0[0], 8);
_g1 = _mm512_i32gather_pd(_idx13, &gradx1[0], 8);
_g2 = _mm512_i32gather_pd(_idx13, &gradx2[0], 8);
_q = _mm512_i32gather_pd(_idx13, &q[0], 8);
_wR = _mm512_fmadd_pd(_g0, _rx, _q);
_wR = _mm512_fmadd_pd(_g1, _ry, _wR);
_wR = _mm512_fmadd_pd(_g2, _rz, _wR);
_ubarR = _mm512_mul_pd(_xn, _uR);
_ubarR = _mm512_fmadd_pd(_yn, _vR, _ubarR);
_ubarR = _mm512_fmadd_pd(_zn, _wR, _ubarR);
const __m512d _dp = _mm512_sub_pd(_pR, _pL);
const __m512d _du = _mm512_sub_pd(_uR, _uL);
const __m512d _dv = _mm512_sub_pd(_vR, _vL);
const __m512d _dw = _mm512_sub_pd(_wR, _wL);
/* Compute averages for velocity variables only */
const __m512d _u = _mm512_mul_pd(_half, _mm512_add_pd(_uL, _uR));
const __m512d _v = _mm512_mul_pd(_half, _mm512_add_pd(_vL, _vR));
const __m512d _w = _mm512_mul_pd(_half, _mm512_add_pd(_wL, _wR));
__m512d _ubar;
_ubar = _mm512_mul_pd(_xn, _u);
_ubar = _mm512_fmadd_pd(_yn, _v, _ubar);
_ubar = _mm512_fmadd_pd(_zn, _w, _ubar);
/* Compute Phi's */
__m512d _phi1;
_phi1 = _mm512_mul_pd(_xn, _beta);
_phi1 = _mm512_fmadd_pd(_u, _ubar, _phi1);
__m512d _phi2;
_phi2 = _mm512_mul_pd(_yn, _beta);
_phi2 = _mm512_fmadd_pd(_v, _ubar, _phi2);
__m512d _phi3;
_phi3 = _mm512_mul_pd(_zn, _beta);
_phi3 = _mm512_fmadd_pd(_w, _ubar, _phi3);
__m512d _phi4;
_phi4 = _mm512_mul_pd(_Z2, _phi2);
_phi4 = _mm512_fmsub_pd(_Y2, _phi3, _phi4);
__m512d _phi5;
_phi5 = _mm512_mul_pd(_X2, _phi3);
_phi5 = _mm512_fmsub_pd(_Z2, _phi1, _phi5);
__m512d _phi6;
_phi6 = _mm512_mul_pd(_Y2, _phi1);
_phi6 = _mm512_fmsub_pd(_X2, _phi2, _phi6);
__m512d _phi7;
_phi7 = _mm512_mul_pd(_Y1, _phi3);
_phi7 = _mm512_fmsub_pd(_Z1, _phi2, _phi7);
__m512d _phi8;
_phi8 = _mm512_mul_pd(_Z1, _phi1);
_phi8 = _mm512_fmsub_pd(_X1, _phi3, _phi8);
__m512d _phi9;
_phi9 = _mm512_mul_pd(_X1, _phi2);
_phi9 = _mm512_fmsub_pd(_Y1, _phi1, _phi9);
/*
Compute eigenvalues, eigenvectors, and strengths
*/
const __m512d _c2 = _mm512_fmadd_pd(_ubar, _ubar, _beta);
#ifdef __USE_SKX
const __m512d _c = _mm512_mul_pd(_mm512_rsqrt14_pd(_c2), _c2);
const __m512d _c2r = _mm512_rcp14_pd(_c2);
#else
const __m512d _c = _mm512_mul_pd(_mm512_rsqrt28_pd(_c2), _c2);
const __m512d _c2r = _mm512_rcp28_pd(_c2);
#endif
const __m512d _bac = _mm512_add_pd(_ubar, _c);
const __m512d _bsc = _mm512_sub_pd(_ubar, _c);
/*
Components of T(inverse)
*/
__m512d _ti11;
_ti11 = _mm512_mul_pd(_u, _phi4);
_ti11 = _mm512_fmadd_pd(_v, _phi5, _ti11);
_ti11 = _mm512_fmadd_pd(_w, _phi6, _ti11);
_ti11 = _mm512_fnmadd_pd(_ti11, _rbeta, _zero);
__m512d _ti21;
_ti21 = _mm512_mul_pd(_u, _phi7);
_ti21 = _mm512_fmadd_pd(_v, _phi8, _ti21);
_ti21 = _mm512_fmadd_pd(_w, _phi9, _ti21);
_ti21 = _mm512_fnmadd_pd(_ti21, _rbeta, _zero);
__m512d _ti31;
_ti31 = _mm512_mul_pd(_half, _mm512_sub_pd(_c, _ubar));
_ti31 = _mm512_mul_pd(_ti31, _rbeta);
__m512d _ti41;
_ti41 = _mm512_mul_pd(_nhalf, _bac);
_ti41 = _mm512_mul_pd(_ti41, _rbeta);
/*
jumps (T(inverse) * dq)
*/
__m512d _dv1;
_dv1 = _mm512_mul_pd(_ti11, _dp);
_dv1 = _mm512_fmadd_pd(_phi4, _du, _dv1);
_dv1 = _mm512_fmadd_pd(_phi5, _dv, _dv1);
_dv1 = _mm512_fmadd_pd(_phi6, _dw, _dv1);
_dv1 = _mm512_mul_pd(_dv1, _c2r);
__m512d _dv2;
_dv2 = _mm512_mul_pd(_ti21, _dp);
_dv2 = _mm512_fmadd_pd(_phi7, _du, _dv2);
_dv2 = _mm512_fmadd_pd(_phi8, _dv, _dv2);
_dv2 = _mm512_fmadd_pd(_phi9, _dw, _dv2);
_dv2 = _mm512_mul_pd(_dv2, _c2r);
__m512d _dv34;
_dv34 = _mm512_mul_pd(_xn, _du);
_dv34 = _mm512_fmadd_pd(_yn, _dv, _dv34);
_dv34 = _mm512_fmadd_pd(_zn, _dw, _dv34);
__m512d _dv3;
_dv3 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti31), _dp, _dv34);
_dv3 = _mm512_mul_pd(_dv3, _mm512_mul_pd(_half, _c2r));
__m512d _dv4;
_dv4 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti41), _dp, _dv34);
_dv4 = _mm512_mul_pd(_dv4, _mm512_mul_pd(_half, _c2r));
/*
Now get elements of T
*/
const __m512d _r13 = _mm512_mul_pd(_c, _beta);
__m512d _r23;
_r23 = _mm512_mul_pd(_u, _bac);
_r23 = _mm512_fmadd_pd(_xn, _beta, _r23);
__m512d _r33;
_r33 = _mm512_mul_pd(_v, _bac);
_r33 = _mm512_fmadd_pd(_yn, _beta, _r33);
__m512d _r43;
_r43 = _mm512_mul_pd(_w, _bac);
_r43 = _mm512_fmadd_pd(_zn, _beta, _r43);
const __m512d _r14 = _mm512_fnmadd_pd(_c, _beta, _zero);
__m512d _r24;
_r24 = _mm512_mul_pd(_u, _bsc);
_r24 = _mm512_fmadd_pd(_xn, _beta, _r24);
__m512d _r34;
_r34 = _mm512_mul_pd(_v, _bsc);
_r34 = _mm512_fmadd_pd(_yn, _beta, _r34);
__m512d _r44;
_r44 = _mm512_mul_pd(_w, _bsc);
_r44 = _mm512_fmadd_pd(_zn, _beta, _r44);
/*
Calculate T* |lambda| * T(inverse)
*/
const __m512d _eig1 = _mm512_abs_pd(_ubar);
const __m512d _eig2 = _mm512_abs_pd(_bac);
const __m512d _eig3 = _mm512_abs_pd(_bsc);
__m512d _t1;
_t1 = _mm512_mul_pd(_mm512_mul_pd(_eig2, _r13), _dv3);
_t1 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r14), _dv4, _t1);
__m512d _t2;
_t2 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _X1), _dv1);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _X2), _dv2, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r23), _dv3, _t2);
_t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r24), _dv4, _t2);
__m512d _t3;
_t3 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Y1), _dv1);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Y2), _dv2, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r33), _dv3, _t3);
_t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r34), _dv4, _t3);
__m512d _t4;
_t4 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Z1), _dv1);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Z2), _dv2, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r43), _dv3, _t4);
_t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r44), _dv4, _t4);
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
/* Left Side */
__m512d _fluxp1;
_fluxp1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarL);
__m512d _fluxp2;
_fluxp2 = _mm512_mul_pd(_uL, _ubarL);
_fluxp2 = _mm512_fmadd_pd(_xn, _pL, _fluxp2);
_fluxp2 = _mm512_mul_pd(_ln, _fluxp2);
__m512d _fluxp3;
_fluxp3 = _mm512_mul_pd(_vL, _ubarL);
_fluxp3 = _mm512_fmadd_pd(_yn, _pL, _fluxp3);
_fluxp3 = _mm512_mul_pd(_ln, _fluxp3);
__m512d _fluxp4;
_fluxp4 = _mm512_mul_pd(_wL, _ubarL);
_fluxp4 = _mm512_fmadd_pd(_zn, _pL, _fluxp4);
_fluxp4 = _mm512_mul_pd(_ln, _fluxp4);
/* Right Side */
__m512d _fluxm1;
_fluxm1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarR);
__m512d _fluxm2;
_fluxm2 = _mm512_mul_pd(_uR, _ubarR);
_fluxm2 = _mm512_fmadd_pd(_xn, _pR, _fluxm2);
_fluxm2 = _mm512_mul_pd(_ln, _fluxm2);
__m512d _fluxm3;
_fluxm3 = _mm512_mul_pd(_vR, _ubarR);
_fluxm3 = _mm512_fmadd_pd(_yn, _pR, _fluxm3);
_fluxm3 = _mm512_mul_pd(_ln, _fluxm3);
__m512d _fluxm4;
_fluxm4 = _mm512_mul_pd(_wR, _ubarR);
_fluxm4 = _mm512_fmadd_pd(_zn, _pR, _fluxm4);
_fluxm4 = _mm512_mul_pd(_ln, _fluxm4);
__m512d _res1;
_res1 = _mm512_fnmadd_pd(_ln, _t1, _mm512_add_pd(_fluxm1, _fluxp1));
__m512d _res2;
_res2 = _mm512_fnmadd_pd(_ln, _t2, _mm512_add_pd(_fluxm2, _fluxp2));
__m512d _res3;
_res3 = _mm512_fnmadd_pd(_ln, _t3, _mm512_add_pd(_fluxm3, _fluxp3));
__m512d _res4;
_res4 = _mm512_fnmadd_pd(_ln, _t4, _mm512_add_pd(_fluxm4, _fluxp4));
/* Update the residual */
__m512i _node, _part;
__mmask _next;
_node = _mm512_castsi256_si512(_n0);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Conflict detection instructions with multiple node update */
/* Node 0 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx0, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx0, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx01, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx01, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx02, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx02, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx03, &r[0], 8);
_d = _mm512_mask_fmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx03, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
_node = _mm512_castsi256_si512(_n1);
_part = _mm512_i32gather_epi32(_node, &part[0], 4);
_next = _mm512_cmpeq_epi32_mask(_part, _t);
/* Node 1 Contributions */
do {
__m512i _cd, _bnext;
__m512d _v, _d;
__mmask _crt;
_cd = _mm512_mask_conflict_epi32(_ng, _next, _node);
_bnext = _mm512_broadcastmw_epi32(_next);
_crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx1, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res1, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx1, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx11, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res2, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx11, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx12, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res3, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx12, _d, 8);
_v = _mm512_mask_i32gather_pd(_und, _crt, _idx13, &r[0], 8);
_d = _mm512_mask_fnmadd_pd(_res4, _crt, _half, _v);
_mm512_mask_i32scatter_pd(&r[0], _crt, _idx13, _d, 8);
_next = _mm512_kxor(_next, _crt);
} while(_next);
}
/* Remainder loop */
for(i = lim; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double xn = x0[i];
const double yn = x1[i];
const double zn = x2[i];
const double ln = x3[i];
const double xmean = 0.5f * (xyz0[node0] + xyz0[node1]);
const double ymean = 0.5f * (xyz1[node0] + xyz1[node1]);
const double zmean = 0.5f * (xyz2[node0] + xyz2[node1]);
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1 = (fabs(xn) < 0.95) ? (1 - xn * xn) : (- yn * xn);
double Y1 = (fabs(xn) < 0.95) ? (- xn * yn) : (1 - yn * yn);
double Z1 = (fabs(xn) < 0.95) ? (- xn * zn) : (- yn * zn);
/*
Normalize the first vector
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal and V1 to get V2
*/
const double X2 = yn * Z1 - zn * Y1;
const double Y2 = zn * X1 - xn * Z1;
const double Z2 = xn * Y1 - yn * X1;
/*
Get variables on "left" and "right" side of face
*/
double rx = xmean - xyz0[node0];
double ry = ymean - xyz1[node0];
double rz = zmean - xyz2[node0];
const uint32_t idx0 = bsz * node0;
const uint32_t idx1 = bsz * node1;
// Pressure
double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx;
pL += gradx1[idx0 + 0] * ry;
pL += gradx2[idx0 + 0] * rz;
// Velocity u
double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx;
uL += gradx1[idx0 + 1] * ry;
uL += gradx2[idx0 + 1] * rz;
// Velocity v
double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx;
vL += gradx1[idx0 + 2] * ry;
vL += gradx2[idx0 + 2] * rz;
// Velocity w
double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx;
wL += gradx1[idx0 + 3] * ry;
wL += gradx2[idx0 + 3] * rz;
double ubarL = xn * uL;
ubarL += yn * vL;
ubarL += zn * wL;
rx = xmean - xyz0[node1];
ry = ymean - xyz1[node1];
rz = zmean - xyz2[node1];
// Pressure
double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx;
pR += gradx1[idx1 + 0] * ry;
pR += gradx2[idx1 + 0] * rz;
// Velocity u
double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx;
uR += gradx1[idx1 + 1] * ry;
uR += gradx2[idx1 + 1] * rz;
// Velocity v
double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx;
vR += gradx1[idx1 + 2] * ry;
vR += gradx2[idx1 + 2] * rz;
// Velocity w
double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx;
wR += gradx1[idx1 + 3] * ry;
wR += gradx2[idx1 + 3] * rz;
double ubarR = xn * uR;
ubarR += yn * vR;
ubarR += zn * wR;
/* Compute averages */
const double u = 0.5f * (uL + uR);
const double v = 0.5f * (vL + vR);
const double w = 0.5f * (wL + wR);
double ubar = xn * u;
ubar += yn * v;
ubar += zn * w;
double phi1 = xn * BETA;
phi1 += u * ubar;
double phi2 = yn * BETA;
phi2 += v * ubar;
double phi3 = zn * BETA;
phi3 += w * ubar;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double c2 = ubar * ubar + BETA;
double c = sqrt(c2);
/*
Now compute eigenvalues, eigenvectors, and strengths
*/
const double uac = ubar + c;
const double usc = ubar - c;
const double eig1 = fabs(ubar);
const double eig2 = fabs(uac);
const double eig3 = fabs(usc);
const double dp = pR - pL;
const double du = uR - uL;
const double dv = vR - vL;
const double dw = wR - wL;
/*
Components of T(inverse)
*/
double ti11 = u * phi4;
ti11 += v * phi5;
ti11 += w * phi6;
ti11 = -ti11 / BETA;
double ti21 = u * phi7;
ti21 += v * phi8;
ti21 += w * phi9;
ti21 = -ti21 / BETA;
double ti31 = 0.5f * (c - ubar);
ti31 /= BETA;
double ti41 = -0.5f * uac;
ti41 /= BETA;
/*
jumps (T(inverse) * dq)
*/
double dv1 = ti11 * dp;
dv1 += phi4 * du;
dv1 += phi5 * dv;
dv1 += phi6 * dw;
dv1 /= c2;
double dv2 = ti21 * dp;
dv2 += phi7 * du;
dv2 += phi8 * dv;
dv2 += phi9 * dw;
dv2 /= c2;
double dv3 = 2.f * ti31 * dp;
dv3 += xn * du;
dv3 += yn * dv;
dv3 += zn * dw;
dv3 *= 0.5f / c2;
double dv4 = 2.f * ti41 * dp;
dv4 += xn * du;
dv4 += yn * dv;
dv4 += zn * dw;
dv4 *= 0.5f / c2;
/*
Now get elements of T
*/
const double r13 = c * BETA;
const double r23 = u * uac + xn * BETA;
const double r33 = v * uac + yn * BETA;
const double r43 = w * uac + zn * BETA;
const double r14 = -c * BETA;
const double r24 = u * usc + xn * BETA;
const double r34 = v * usc + yn * BETA;
const double r44 = w * usc + zn * BETA;
/*
Calculate T* |lambda| * T(inverse)
*/
double t1 = eig2 * r13 * dv3 + eig3 * r14 * dv4;
double t2 = eig1 * X1 * dv1 + eig1 * X2 * dv2;
t2 += eig2 * r23 * dv3 + eig3 * r24 * dv4;
double t3 = eig1 * Y1 * dv1 + eig1 * Y2 * dv2;
t3 += eig2 * r33 * dv3 + eig3 * r34 * dv4;
double t4 = eig1 * Z1 * dv1 + eig1 * Z2 * dv2;
t4 += eig2 * r43 * dv3 + eig3 * r44 * dv4;
/*
Modify to calculate .5(fl +fr) from nodes
instead of extrapolated ones
*/
const double fluxp1 = ln * BETA * ubarL;
const double fluxp2 = ln * (uL * ubarL + xn * pL);
const double fluxp3 = ln * (vL * ubarL + yn * pL);
const double fluxp4 = ln * (wL * ubarL + zn * pL);
/*
Now the right side
*/
const double fluxm1 = ln * BETA * ubarR;
const double fluxm2 = ln * (uR * ubarR + xn * pR);
const double fluxm3 = ln * (vR * ubarR + yn * pR);
const double fluxm4 = ln * (wR * ubarR + zn * pR);
const double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1);
const double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2);
const double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3);
const double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4);
r[idx0 + 0] = (part[node0] == t) ? (r[idx0 + 0] + res1) : r[idx0 + 0];
r[idx0 + 1] = (part[node0] == t) ? (r[idx0 + 1] + res2) : r[idx0 + 1];
r[idx0 + 2] = (part[node0] == t) ? (r[idx0 + 2] + res3) : r[idx0 + 2];
r[idx0 + 3] = (part[node0] == t) ? (r[idx0 + 3] + res4) : r[idx0 + 3];
r[idx1 + 0] = (part[node1] == t) ? (r[idx1 + 0] - res1) : r[idx1 + 0];
r[idx1 + 1] = (part[node1] == t) ? (r[idx1 + 1] - res2) : r[idx1 + 1];
r[idx1 + 2] = (part[node1] == t) ? (r[idx1 + 2] - res3) : r[idx1 + 2];
r[idx1 + 3] = (part[node1] == t) ? (r[idx1 + 3] - res4) : r[idx1 + 3];
}
}
uint32_t i;
for(i = 0; i < snfc; i++)
{
const uint32_t if0 = snfic[i];
const uint32_t if1 = snfic[i+1];
uint32_t j;
#pragma omp parallel for
for(j = if0; j < if1; j++)
{
const uint32_t node0 = sn0[j];
const uint32_t node1 = sn1[j];
const uint32_t node2 = sn2[j];
const double p1 = q[bsz * node0];
const double p2 = q[bsz * node1];
const double p3 = q[bsz * node2];
const double ax = xyz0[node1] - xyz0[node0];
const double ay = xyz1[node1] - xyz1[node0];
const double az = xyz2[node1] - xyz2[node0];
const double bx = xyz0[node2] - xyz0[node0];
const double by = xyz1[node2] - xyz1[node0];
const double bz = xyz2[node2] - xyz2[node0];
/*
Normal points away from grid interior.
Magnitude is 1/3 area of surface triangle.
*/
double xn = ay * bz;
xn -= az * by;
xn *= MAG1;
double yn = ax * bz;
yn -= az * bx;
yn *= MAG0;
double zn = ax * by;
zn -= ay * bx;
zn *= MAG1;
double pa = 0.125f * (p2 + p3);
pa += 0.75f * p1;
double pb = 0.125f * (p3 + p1);
pb += 0.75f * p2;
double pc = 0.125f * (p1 + p2);
pc += 0.75f * p3;
uint32_t idx;
idx = bsz * node0;
r[idx + 1] += xn * pa;
r[idx + 2] += yn * pa;
r[idx + 3] += zn * pa;
idx = bsz * node1;
r[idx + 1] += xn * pb;
r[idx + 2] += yn * pb;
r[idx + 3] += zn * pb;
idx = bsz * node2;
r[idx + 1] += xn * pc;
r[idx + 2] += yn * pc;
r[idx + 3] += zn * pc;
}
}
/* Do the free boundaries */
#pragma omp parallel for
for(i = 0; i < nfnodes; i++)
{
uint32_t n = nfptr[i];
/*
Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn
has the magnitude of the face contained in it.
*/
double xn = f_xyz0[i];
double yn = f_xyz1[i];
double zn = f_xyz2[i];
double area = xn * xn;
area += yn * yn;
area += zn * zn;
area = sqrt(area);
xn /= area;
yn /= area;
zn /= area;
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double X1, Y1, Z1;
double dot = xn;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = -dot * yn;
Z1 = -dot * zn;
}
else
{
dot = yn;
X1 = -dot * xn;
Y1 = 1.f - dot * yn;
Z1 = -dot * zn;
}
/*
Normalize the first vector (V1)
*/
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/*
Take cross-product of normal with V1 to get V2
*/
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/*
Calculate elements of T and T(inverse) evaluated at free-stream
*/
double ubar0 = xn * velocity_u;
ubar0 += yn * velocity_v;
ubar0 += zn * velocity_w;
double c20 = ubar0 * ubar0 + BETA;
double c0 = sqrt(c20);
double phi1 = xn * BETA;
phi1 += velocity_u * ubar0;
double phi2 = yn * BETA;
phi2 += velocity_v * ubar0;
double phi3 = zn * BETA;
phi3 += velocity_w * ubar0;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
double t13 = c0 * BETA;
double t23 = velocity_u * (ubar0 + c0);
t23 += xn * BETA;
double t33 = velocity_v * (ubar0 + c0);
t33 += yn * BETA;
double t43 = velocity_w * (ubar0 + c0);
t43 += zn * BETA;
double t14 = -c0 * BETA;
double t24 = velocity_u * (ubar0 - c0);
t24 += xn * BETA;
double t34 = velocity_v * (ubar0 - c0);
t34 += yn * BETA;
double t44 = velocity_w * (ubar0 - c0);
t44 += zn * BETA;
double ti11 = velocity_u * phi4;
ti11 += velocity_v * phi5;
ti11 += velocity_w * phi6;
ti11 = -ti11/BETA;
double ti21 = velocity_u * phi7;
ti21 += velocity_v * phi8;
ti21 += velocity_w * phi9;
ti21 = -ti21/BETA;
double ti31 = 0.5f * (c0 - ubar0);
ti31 /= BETA;
double ti41 = -0.5f * (c0 + ubar0);
ti41 /= BETA;
/*
Now, get the variables on the "inside"
*/
double pi = q[bsz * n + 0];
double ui = q[bsz * n + 1];
double vi = q[bsz * n + 2];
double wi = q[bsz * n + 3];
double un = xn * ui;
un += yn * vi;
un += zn * wi;
/*
If ubar is negative, take the reference condition from outside
*/
double pr, ur, vr, wr;
if(un > 0.f)
{
pr = pi;
ur = ui;
vr = vi;
wr = wi;
}
else
{
pr = pressure;
ur = velocity_u;
vr = velocity_v;
wr = velocity_w;
}
/*
Set rhs
*/
double rhs1 = ti11 * pr;
rhs1 += phi4 * ur;
rhs1 += phi5 * vr;
rhs1 += phi6 * wr;
rhs1 /= c20;
double rhs2 = ti21 * pr;
rhs2 += phi7 * ur;
rhs2 += phi8 * vr;
rhs2 += phi9 * wr;
rhs2 /= c20;
double rhs3 = 2.f * ti31 * pi;
rhs3 += xn * ui;
rhs3 += yn * vi;
rhs3 += zn * wi;
rhs3 = 0.5f * rhs3 / c20;
double rhs4 = 2.f * ti41 * pressure;
rhs4 += xn * velocity_u;
rhs4 += yn * velocity_v;
rhs4 += zn * velocity_w;
rhs4 = 0.5f * rhs4 / c20;
/*
Now do matrix multiplication to get values on boundary
*/
double pb = t13 * rhs3;
pb += t14 * rhs4;
double ub = X1 * rhs1;
ub += X2 * rhs2;
ub += t23 * rhs3;
ub += t24 * rhs4;
double vb = Y1 * rhs1;
vb += Y2 * rhs2;
vb += t33 * rhs3;
vb += t34 * rhs4;
double wb = Z1 * rhs1;
wb += Z2 * rhs2;
wb += t43 * rhs3;
wb += t44 * rhs4;
double ubar = xn * ub;
ubar += yn * vb;
ubar += zn * wb;
uint32_t idx = bsz * n;
r[idx + 0] += area * BETA * ubar;
r[idx + 1] += area * (ub * ubar + xn * pb);
r[idx + 2] += area * (vb * ubar + yn * pb);
r[idx + 3] += area * (wb * ubar + zn * pb);
}
compute_time(&ktime, flux->t);
}
|
par_relax.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Relaxation scheme
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_type,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n_global= hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int first_index = hypre_ParVectorFirstIndex(u);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Real *tmp_data;
hypre_Vector *Ztemp_local;
HYPRE_Real *Ztemp_data;
hypre_CSRMatrix *A_CSR;
HYPRE_Int *A_CSR_i;
HYPRE_Int *A_CSR_j;
HYPRE_Real *A_CSR_data;
hypre_Vector *f_vector;
HYPRE_Real *f_vector_data;
HYPRE_Int i, j, jr;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int column;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int num_recvs;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id, ip, p;
HYPRE_Int vec_start, vec_len;
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res0, res2;
HYPRE_Real one_minus_weight;
HYPRE_Real one_minus_omega;
HYPRE_Real prod;
one_minus_weight = 1.0 - relax_weight;
one_minus_omega = 1.0 - omega;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------------
* Switch statement to direct control based on relax_type:
* relax_type = 0 -> Jacobi or CF-Jacobi
* relax_type = 1 -> Gauss-Seidel <--- very slow, sequential
* relax_type = 2 -> Gauss_Seidel: interior points in parallel ,
* boundary sequential
* relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (forward solve)
* relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (backward solve)
* relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node
* relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor
* with outer relaxation parameters
* relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR
* relax_type = 19-> Direct Solve, (old version)
* relax_type = 29-> Direct solve: use gaussian elimination & BLAS
* (with pivoting) (old version)
*-----------------------------------------------------------------------*/
switch (relax_type)
{
case 0: /* Weighted Jacobi */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
}
break;
case 5: /* Hybrid: Jacobi off-processor,
chaotic Gauss-Seidel on-processor */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
}
break;
case 3: /* Hybrid: Jacobi off-processor,
Gauss-Seidel on-processor
(forward loop) */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
#ifdef HYPRE_USING_PERSISTENT_COMM
// JSP: persistent comm can be similarly used for other smoothers
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (num_procs > 1)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
v_buf_data = (HYPRE_Real *)persistent_comm_handle->send_data;
Vext_data = (HYPRE_Real *)persistent_comm_handle->recv_data;
#else
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
#endif
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; i++)
{
v_buf_data[i - begin]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle);
#else
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
#endif
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle);
#else
hypre_ParCSRCommHandleDestroy(comm_handle);
#endif
comm_handle = NULL;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime();
#endif
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
#ifndef HYPRE_USING_PERSISTENT_COMM
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime();
#endif
}
break;
case 1: /* Gauss-Seidel VERY SLOW */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
status = hypre_CTAlloc(hypre_MPI_Status,num_recvs+num_sends);
requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
/*
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
} */
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
for (j=vec_start; j < vec_start+vec_len; j++)
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr,requests,status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr,requests,status);
}
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
hypre_MPI_Barrier(comm);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
hypre_TFree(status);
hypre_TFree(requests);
}
}
break;
case 2: /* Gauss-Seidel: relax interior points in parallel, boundary
sequentially */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
status = hypre_CTAlloc(hypre_MPI_Status,num_recvs+num_sends);
requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
/*
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
} */
/*-----------------------------------------------------------------
* Relax interior points first
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ((A_offd_i[i+1]-A_offd_i[i]) == zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& (A_offd_i[i+1]-A_offd_i[i]) == zero
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
for (j=vec_start; j < vec_start+vec_len; j++)
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr,requests,status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr,requests,status);
}
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ((A_offd_i[i+1]-A_offd_i[i]) != zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& (A_offd_i[i+1]-A_offd_i[i]) != zero
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
hypre_MPI_Barrier(comm);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
hypre_TFree(status);
hypre_TFree(requests);
}
}
break;
case 4: /* Hybrid: Jacobi off-processor,
Gauss-Seidel/SOR on-processor
(backward loop) */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
hypre_TFree(tmp_data);
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
hypre_TFree(tmp_data);
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
hypre_TFree(tmp_data);
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
hypre_TFree(tmp_data);
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
}
break;
case 6: /* Hybrid: Jacobi off-processor,
Symm. Gauss-Seidel/ SSOR on-processor
with outer relaxation parameter */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
}
break;
case 7: /* Jacobi (uses ParMatvec) */
{
/*-----------------------------------------------------------------
* Copy f into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParVectorCopy(f,Vtemp);
/*-----------------------------------------------------------------
* Perform Matvec Vtemp=f-Au
*-----------------------------------------------------------------*/
hypre_ParCSRMatrixMatvec(-relax_weight,A, u, relax_weight, Vtemp);
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
u_data[i] += Vtemp_data[i] / l1_norms[i];
}
}
break;
case 8: /* hybrid L1 Symm. Gauss-Seidel */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
}
break;
case 13: /* hybrid L1 Gauss-Seidel forward solve */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
}
break;
case 14: /* hybrid L1 Gauss-Seidel backward solve */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
}
break;
case 19: /* Direct solve: use gaussian elimination */
{
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
if (n)
{
#else
if (n)
{
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
column = A_CSR_j[jj];
A_mat[i*n_global+column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
relax_error = gselim(A_mat,b_vec,n_global);
for (i = 0; i < n; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_mat);
hypre_TFree(b_vec);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#endif
}
break;
}
return(relax_error);
}
/*-------------------------------------------------------------------------
*
* Gaussian Elimination
*
*------------------------------------------------------------------------ */
HYPRE_Int hypre_GaussElimSetup (hypre_ParAMGData *amg_data, HYPRE_Int level, HYPRE_Int relax_type)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SETUP] -= hypre_MPI_Wtime();
#endif
/* Par Data Structure variables */
hypre_ParCSRMatrix *A = hypre_ParAMGDataAArray(amg_data)[level];
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
MPI_Comm new_comm;
/* Generate sub communicator */
hypre_GenerateSubComm(comm, num_rows, &new_comm);
if (num_rows)
{
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Real *A_mat, *A_mat_local;
HYPRE_Int *comm_info, *info, *displs;
HYPRE_Int *mat_info, *mat_displs;
HYPRE_Int new_num_procs, A_mat_local_size, i, jj, column;
HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(A);
hypre_MPI_Comm_size(new_comm, &new_num_procs);
comm_info = hypre_CTAlloc(HYPRE_Int, 2*new_num_procs+1);
mat_info = hypre_CTAlloc(HYPRE_Int, new_num_procs);
mat_displs = hypre_CTAlloc(HYPRE_Int, new_num_procs+1);
info = &comm_info[0];
displs = &comm_info[new_num_procs];
hypre_MPI_Allgather(&num_rows, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, new_comm);
displs[0] = 0;
mat_displs[0] = 0;
for (i=0; i < new_num_procs; i++)
{
displs[i+1] = displs[i]+info[i];
mat_displs[i+1] = global_num_rows*displs[i+1];
mat_info[i] = global_num_rows*info[i];
}
hypre_ParAMGDataBVec(amg_data) = hypre_CTAlloc(HYPRE_Real, global_num_rows);
A_mat_local_size = global_num_rows*num_rows;
A_mat_local = hypre_CTAlloc(HYPRE_Real, A_mat_local_size);
A_mat = hypre_CTAlloc(HYPRE_Real, global_num_rows*global_num_rows);
/* load local matrix into A_mat_local */
for (i = 0; i < num_rows; i++)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
/* need col major */
column = A_diag_j[jj]+first_row_index;
A_mat_local[i*global_num_rows + column] = A_diag_data[jj];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
/* need col major */
column = col_map_offd[A_offd_j[jj]];
A_mat_local[i*global_num_rows + column] = A_offd_data[jj];
}
}
hypre_MPI_Allgatherv( A_mat_local, A_mat_local_size, HYPRE_MPI_REAL, A_mat,
mat_info, mat_displs, HYPRE_MPI_REAL, new_comm);
if (relax_type == 99)
{
HYPRE_Real *AT_mat;
AT_mat = hypre_CTAlloc(HYPRE_Real, global_num_rows*global_num_rows);
for (i=0; i < global_num_rows; i++)
for (jj=0; jj < global_num_rows; jj++)
AT_mat[i*global_num_rows + jj] = A_mat[i+ jj*global_num_rows];
hypre_ParAMGDataAMat(amg_data) = AT_mat;
hypre_TFree (A_mat);
}
else
hypre_ParAMGDataAMat(amg_data) = A_mat;
hypre_ParAMGDataCommInfo(amg_data) = comm_info;
hypre_ParAMGDataNewComm(amg_data) = new_comm;
hypre_TFree(mat_info);
hypre_TFree(mat_displs);
hypre_TFree(A_mat_local);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SETUP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
HYPRE_Int hypre_GaussElimSolve (hypre_ParAMGData *amg_data, HYPRE_Int level, HYPRE_Int relax_type)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SOLVE] -= hypre_MPI_Wtime();
#endif
hypre_ParCSRMatrix *A = hypre_ParAMGDataAArray(amg_data)[level];
HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int error_flag = 0;
if (n)
{
MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data);
hypre_ParVector *f = hypre_ParAMGDataFArray(amg_data)[level];
hypre_ParVector *u = hypre_ParAMGDataUArray(amg_data)[level];
HYPRE_Real *A_mat = hypre_ParAMGDataAMat(amg_data);
HYPRE_Real *b_vec = hypre_ParAMGDataBVec(amg_data);
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *A_tmp;
HYPRE_Int *comm_info = hypre_ParAMGDataCommInfo(amg_data);
HYPRE_Int *displs, *info;
HYPRE_Int n_global = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int new_num_procs, i, my_info;
HYPRE_Int first_index = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int one_i = 1;
hypre_MPI_Comm_size(new_comm, &new_num_procs);
info = &comm_info[0];
displs = &comm_info[new_num_procs];
hypre_MPI_Allgatherv ( f_data, n, HYPRE_MPI_REAL,
b_vec, info, displs,
HYPRE_MPI_REAL, new_comm );
A_tmp = hypre_CTAlloc (HYPRE_Real, n_global*n_global);
for (i=0; i < n_global*n_global; i++)
A_tmp[i] = A_mat[i];
if (relax_type == 9)
{
error_flag = gselim(A_tmp,b_vec,n_global);
}
for (i = 0; i < n; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_tmp);
}
if (error_flag) hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SOLVE] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
HYPRE_Int gselim(HYPRE_Real *A,
HYPRE_Real *x,
HYPRE_Int n)
{
HYPRE_Int err_flag = 0;
HYPRE_Int j,k,m;
HYPRE_Real factor;
HYPRE_Real divA;
if (n==1) /* A is 1x1 */
{
if (A[0] != 0.0)
{
x[0] = x[0]/A[0];
return(err_flag);
}
else
{
err_flag = 1;
return(err_flag);
}
}
else /* A is nxn. Forward elimination */
{
for (k = 0; k < n-1; k++)
{
if (A[k*n+k] != 0.0)
{
divA = 1.0/A[k*n+k];
for (j = k+1; j < n; j++)
{
if (A[j*n+k] != 0.0)
{
factor = A[j*n+k]*divA;
for (m = k+1; m < n; m++)
{
A[j*n+m] -= factor * A[k*n+m];
}
/* Elimination step for rhs */
x[j] -= factor * x[k];
}
}
}
}
/* Back Substitution */
for (k = n-1; k > 0; --k)
{
if (A[k*n+k] != 0.0)
{
x[k] /= A[k*n+k];
for (j = 0; j < k; j++)
{
if (A[j*n+k] != 0.0)
{
x[j] -= x[k] * A[j*n+k];
}
}
}
}
if (A[0] != 0.0) x[0] /= A[0];
return(err_flag);
}
}
|
a.24.1.c | /* { dg-do compile } */
/* { dg-require-effective-target tls } */
extern int omp_get_num_threads (void);
int x, y, t, z[1000];
#pragma omp threadprivate(x)
void
a24 (int a)
{
const int c = 1;
int i = 0;
int l = 0;
#pragma omp parallel default(none) private(a) shared(z)
{
int j = omp_get_num_threads ();
/* O.K. - j is declared within parallel region */
/* O.K. - a is listed in private clause */
/* - z is listed in shared clause */
x = c; /* O.K. - x is threadprivate */
/* - c has const-qualified type */
z[i] = y;
/* { dg-error "'i' not specified" "" { target *-*-* } .-1 } */
/* { dg-error "enclosing 'parallel'" "" { target *-*-* } 13 } */
/* { dg-error "'y' not specified" "" { target *-*-* } 21 } */
#pragma omp for firstprivate(y)
for (i = 0; i < 10; i++)
{
z[i] = y; /* O.K. - i is the loop iteration variable */
/* - y is listed in firstprivate clause */
}
z[l] = t;
/* { dg-error "'l' not specified" "" { target *-*-* } .-1 } */
/* { dg-error "'t' not specified" "" { target *-*-* } .-2 } */
}
}
|
2d_simple_v2.c | #include <stdlib.h>
#include <omp.h>
int main()
{
int* data = malloc(sizeof(int));
int** arr = malloc(sizeof(int*));
arr[0] = data;
#pragma omp parallel
{
arr[0][0] = 42;
printf("%d\n", arr[0][0]);
}
free(data);
free(arr);
}
|
38f9a6e_so12_advfsg_gcc.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(float *restrict r118_vec, float *restrict r119_vec, float *restrict r74_vec, float *restrict r75_vec, float *restrict r76_vec, float *restrict r77_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r118_vec, float *restrict r119_vec, float *restrict r73_vec, float *restrict r74_vec, float *restrict r75_vec, float *restrict r76_vec, float *restrict r77_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float (*r73)[y_size + 3 + 3][z_size + 3 + 3];
posix_memalign((void**)&r73, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3]));
float (*r74)[y_size + 3 + 3][z_size + 3 + 3];
posix_memalign((void**)&r74, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3]));
float (*r75)[y_size + 3 + 3][z_size + 3 + 3];
posix_memalign((void**)&r75, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3]));
float (*r76)[y_size + 3 + 3][z_size + 3 + 3];
posix_memalign((void**)&r76, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3]));
float (*r77)[y_size + 3 + 3][z_size + 3 + 3];
posix_memalign((void**)&r77, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3]));
float (*r118)[y_size + 3 + 3][z_size + 3 + 3];
posix_memalign((void**)&r118, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3]));
float (*r119)[y_size + 3 + 3][z_size + 3 + 3];
posix_memalign((void**)&r119, 64, sizeof(float[x_size + 3 + 3][y_size + 3 + 3][z_size + 3 + 3]));
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static,1)
for (int x = x_m - 3; x <= x_M + 3; x += 1)
{
for (int y = y_m - 3; y <= y_M + 3; y += 1)
{
#pragma omp simd aligned(delta,phi,theta:32)
for (int z = z_m - 3; z <= z_M + 3; z += 1)
{
r73[x + 3][y + 3][z + 3] = sqrt(2*delta[x + 12][y + 12][z + 12] + 1);
r74[x + 3][y + 3][z + 3] = cos(theta[x + 12][y + 12][z + 12]);
r75[x + 3][y + 3][z + 3] = sin(phi[x + 12][y + 12][z + 12]);
r76[x + 3][y + 3][z + 3] = sin(theta[x + 12][y + 12][z + 12]);
r77[x + 3][y + 3][z + 3] = cos(phi[x + 12][y + 12][z + 12]);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 6;
int t_blk_size = 2 * sf * (time_M - time_m);
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m - 3 ; xb <= (x_M + 3 + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m - 3 ; yb <= (y_M + 3 + sf * (time_M - time_m)); yb += yb_size)
{
//printf(" Timestep tw: %d, Updating x: %d y: %d \n", xb, yb);
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0((float *)r118, (float *)r119, (float *)r74, (float *)r75, (float *)r76, (float *)r77, u_vec, v_vec, x_size, y_size, z_size, time, t0, x0_blk0_size, x_M + 3, x_m - 3, y0_blk0_size, y_M + 3, y_m - 3, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF0 - 1 IS OVER");
/*==============================================*/
bf1(damp_vec, dt, epsilon_vec, (float *)r118, (float *)r119, (float *)r73, (float *)r74, (float *)r75, (float *)r76, (float *)r77, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x0_blk0_size, x_M, x_m, y0_blk0_size, y_M, y_m , z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF1 - 1 IS OVER");
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
free(r77);
free(r76);
free(r75);
free(r74);
free(r73);
free(r118);
free(r119);
return 0;
}
void bf0(float *restrict r118_vec, float *restrict r119_vec, float *restrict r74_vec, float *restrict r75_vec, float *restrict r76_vec, float *restrict r77_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float (*restrict r118)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r118_vec;
float (*restrict r119)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r119_vec;
float (*restrict r74)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r74_vec;
float (*restrict r75)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r75_vec;
float (*restrict r76)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r76_vec;
float (*restrict r77)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r77_vec;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data;
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
//printf(" Change of inner x0_blk0 %d \n", x0_blk0);
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d \n", tw, x - time + 1);
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 3; z <= z_M + 3; z += 1)
{
//printf(" bf0 Updating x: %d y: %d z: %d \n", x - time + 2, y - time + 2, z + 2);
r118[x - time + 3][y - time + 3][z + 3] = -(1.66666669e-3F*(-u[t0][x - time + 9][y - time + 12][z + 12] + u[t0][x - time + 15][y - time + 12][z + 12]) + 1.50000002e-2F*(u[t0][x - time + 10][y - time + 12][z + 12] - u[t0][x - time + 14][y - time + 12][z + 12]) + 7.50000011e-2F*(-u[t0][x - time + 11][y - time + 12][z + 12] + u[t0][x - time + 13][y - time + 12][z + 12]))*r76[x - time + 3][y - time + 3][z + 3]*r77[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F*(-u[t0][x - time + 12][y - time + 9][z + 12] + u[t0][x - time + 12][y - time + 15][z + 12]) + 1.50000002e-2F*(u[t0][x - time + 12][y - time + 10][z + 12] - u[t0][x - time + 12][y - time + 14][z + 12]) + 7.50000011e-2F*(-u[t0][x - time + 12][y - time + 11][z + 12] + u[t0][x - time + 12][y - time + 13][z + 12]))*r75[x - time + 3][y - time + 3][z + 3]*r76[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F*(-u[t0][x - time + 12][y - time + 12][z + 9] + u[t0][x - time + 12][y - time + 12][z + 15]) + 1.50000002e-2F*(u[t0][x - time + 12][y - time + 12][z + 10] - u[t0][x - time + 12][y - time + 12][z + 14]) + 7.50000011e-2F*(-u[t0][x - time + 12][y - time + 12][z + 11] + u[t0][x - time + 12][y - time + 12][z + 13]))*r74[x - time + 3][y - time + 3][z + 3];
r119[x - time + 3][y - time + 3][z + 3] = -(1.66666669e-3F*(-v[t0][x - time + 9][y - time + 12][z + 12] + v[t0][x - time + 15][y - time + 12][z + 12]) + 1.50000002e-2F*(v[t0][x - time + 10][y - time + 12][z + 12] - v[t0][x - time + 14][y - time + 12][z + 12]) + 7.50000011e-2F*(-v[t0][x - time + 11][y - time + 12][z + 12] + v[t0][x - time + 13][y - time + 12][z + 12]))*r76[x - time + 3][y - time + 3][z + 3]*r77[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F*(-v[t0][x - time + 12][y - time + 9][z + 12] + v[t0][x - time + 12][y - time + 15][z + 12]) + 1.50000002e-2F*(v[t0][x - time + 12][y - time + 10][z + 12] - v[t0][x - time + 12][y - time + 14][z + 12]) + 7.50000011e-2F*(-v[t0][x - time + 12][y - time + 11][z + 12] + v[t0][x - time + 12][y - time + 13][z + 12]))*r75[x - time + 3][y - time + 3][z + 3]*r76[x - time + 3][y - time + 3][z + 3] - (1.66666669e-3F*(-v[t0][x - time + 12][y - time + 12][z + 9] + v[t0][x - time + 12][y - time + 12][z + 15]) + 1.50000002e-2F*(v[t0][x - time + 12][y - time + 12][z + 10] - v[t0][x - time + 12][y - time + 12][z + 14]) + 7.50000011e-2F*(-v[t0][x - time + 12][y - time + 12][z + 11] + v[t0][x - time + 12][y - time + 12][z + 13]))*r74[x - time + 3][y - time + 3][z + 3];
//printf("bf0 Timestep tw: %d, Updating x: %d y: %d value: %f \n", tw, x - time + 2, y - time + 2, v[t0][x - time + 9][y - time + 8][z + 8]);
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r118_vec, float *restrict r119_vec, float *restrict r73_vec, float *restrict r74_vec, float *restrict r75_vec, float *restrict r76_vec, float *restrict r77_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]]) epsilon_vec->data;
float (*restrict r118)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r118_vec;
float (*restrict r119)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r119_vec;
float (*restrict r73)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r73_vec;
float (*restrict r74)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r74_vec;
float (*restrict r75)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r75_vec;
float (*restrict r76)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r76_vec;
float (*restrict r77)[y_size + 3 + 3][z_size + 3 + 3] __attribute__ ((aligned (64))) = (float (*)[y_size + 3 + 3][z_size + 3 + 3]) r77_vec;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
//printf("In bf1 \n");
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x1_blk0 = max((x_m + time), xb - 0); x1_blk0 <= +min((x_M + time), (xb - 0 + xb_size)); x1_blk0 += x1_blk0_size)
{
//printf(" Change of inner x1_blk0 %d \n", x1_blk0);
for (int y1_blk0 = max((y_m + time), yb - 0); y1_blk0 <= +min((y_M + time), (yb - 0 + yb_size)); y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= min(min((x_M + time), (xb - 0 + xb_size - 1)), (x1_blk0 + x1_blk0_size - 1)); x++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d \n", tw, x - time + 4);
for (int y = y1_blk0; y <= min(min((y_M + time), (yb - 0 + yb_size - 1)), (y1_blk0 + y1_blk0_size - 1)); y++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 4, y - time + 4);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
float r130 = 1.0/dt;
float r129 = 1.0/(dt*dt);
float r128 = 1.50000002e-2F*(-r119[x - time + 1][y - time + 3][z + 3]*r76[x - time + 1][y - time + 3][z + 3]*r77[x - time + 1][y - time + 3][z + 3] - r119[x - time + 3][y - time + 1][z + 3]*r75[x - time + 3][y - time + 1][z + 3]*r76[x - time + 3][y - time + 1][z + 3] - r119[x - time + 3][y - time + 3][z + 1]*r74[x - time + 3][y - time + 3][z + 1] + r119[x - time + 3][y - time + 3][z + 5]*r74[x - time + 3][y - time + 3][z + 5] + r119[x - time + 3][y - time + 5][z + 3]*r75[x - time + 3][y - time + 5][z + 3]*r76[x - time + 3][y - time + 5][z + 3] + r119[x - time + 5][y - time + 3][z + 3]*r76[x - time + 5][y - time + 3][z + 3]*r77[x - time + 5][y - time + 3][z + 3]);
float r127 = 1.66666669e-3F*(r119[x - time][y - time + 3][z + 3]*r76[x - time][y - time + 3][z + 3]*r77[x - time][y - time + 3][z + 3] + r119[x - time + 3][y - time][z + 3]*r75[x - time + 3][y - time][z + 3]*r76[x - time + 3][y - time][z + 3] + r119[x - time + 3][y - time + 3][z]*r74[x - time + 3][y - time + 3][z] - r119[x - time + 3][y - time + 3][z + 6]*r74[x - time + 3][y - time + 3][z + 6] - r119[x - time + 3][y - time + 6][z + 3]*r75[x - time + 3][y - time + 6][z + 3]*r76[x - time + 3][y - time + 6][z + 3] - r119[x - time + 6][y - time + 3][z + 3]*r76[x - time + 6][y - time + 3][z + 3]*r77[x - time + 6][y - time + 3][z + 3]);
float r126 = 7.50000011e-2F*(r119[x - time + 2][y - time + 3][z + 3]*r76[x - time + 2][y - time + 3][z + 3]*r77[x - time + 2][y - time + 3][z + 3] + r119[x - time + 3][y - time + 2][z + 3]*r75[x - time + 3][y - time + 2][z + 3]*r76[x - time + 3][y - time + 2][z + 3] + r119[x - time + 3][y - time + 3][z + 2]*r74[x - time + 3][y - time + 3][z + 2] - r119[x - time + 3][y - time + 3][z + 4]*r74[x - time + 3][y - time + 3][z + 4] - r119[x - time + 3][y - time + 4][z + 3]*r75[x - time + 3][y - time + 4][z + 3]*r76[x - time + 3][y - time + 4][z + 3] - r119[x - time + 4][y - time + 3][z + 3]*r76[x - time + 4][y - time + 3][z + 3]*r77[x - time + 4][y - time + 3][z + 3]);
float r125 = pow(vp[x - time + 12][y - time + 12][z + 12], -2);
float r124 = 1.0/(r125*r129 + r130*damp[x - time + 1][y - time + 1][z + 1]);
float r123 = 1.66666669e-3F*(-r118[x - time][y - time + 3][z + 3]*r76[x - time][y - time + 3][z + 3]*r77[x - time][y - time + 3][z + 3] - r118[x - time + 3][y - time][z + 3]*r75[x - time + 3][y - time][z + 3]*r76[x - time + 3][y - time][z + 3] - r118[x - time + 3][y - time + 3][z]*r74[x - time + 3][y - time + 3][z] + r118[x - time + 3][y - time + 3][z + 6]*r74[x - time + 3][y - time + 3][z + 6] + r118[x - time + 3][y - time + 6][z + 3]*r75[x - time + 3][y - time + 6][z + 3]*r76[x - time + 3][y - time + 6][z + 3] + r118[x - time + 6][y - time + 3][z + 3]*r76[x - time + 6][y - time + 3][z + 3]*r77[x - time + 6][y - time + 3][z + 3]) + 1.50000002e-2F*(r118[x - time + 1][y - time + 3][z + 3]*r76[x - time + 1][y - time + 3][z + 3]*r77[x - time + 1][y - time + 3][z + 3] + r118[x - time + 3][y - time + 1][z + 3]*r75[x - time + 3][y - time + 1][z + 3]*r76[x - time + 3][y - time + 1][z + 3] + r118[x - time + 3][y - time + 3][z + 1]*r74[x - time + 3][y - time + 3][z + 1] - r118[x - time + 3][y - time + 3][z + 5]*r74[x - time + 3][y - time + 3][z + 5] - r118[x - time + 3][y - time + 5][z + 3]*r75[x - time + 3][y - time + 5][z + 3]*r76[x - time + 3][y - time + 5][z + 3] - r118[x - time + 5][y - time + 3][z + 3]*r76[x - time + 5][y - time + 3][z + 3]*r77[x - time + 5][y - time + 3][z + 3]) + 7.50000011e-2F*(-r118[x - time + 2][y - time + 3][z + 3]*r76[x - time + 2][y - time + 3][z + 3]*r77[x - time + 2][y - time + 3][z + 3] - r118[x - time + 3][y - time + 2][z + 3]*r75[x - time + 3][y - time + 2][z + 3]*r76[x - time + 3][y - time + 2][z + 3] - r118[x - time + 3][y - time + 3][z + 2]*r74[x - time + 3][y - time + 3][z + 2] + r118[x - time + 3][y - time + 3][z + 4]*r74[x - time + 3][y - time + 3][z + 4] + r118[x - time + 3][y - time + 4][z + 3]*r75[x - time + 3][y - time + 4][z + 3]*r76[x - time + 3][y - time + 4][z + 3] + r118[x - time + 4][y - time + 3][z + 3]*r76[x - time + 4][y - time + 3][z + 3]*r77[x - time + 4][y - time + 3][z + 3]) - 6.01250588e-7F*(u[t0][x - time + 6][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 6][z + 12] + u[t0][x - time + 12][y - time + 12][z + 6] + u[t0][x - time + 12][y - time + 12][z + 18] + u[t0][x - time + 12][y - time + 18][z + 12] + u[t0][x - time + 18][y - time + 12][z + 12]) + 1.03896102e-5F*(u[t0][x - time + 7][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 7][z + 12] + u[t0][x - time + 12][y - time + 12][z + 7] + u[t0][x - time + 12][y - time + 12][z + 17] + u[t0][x - time + 12][y - time + 17][z + 12] + u[t0][x - time + 17][y - time + 12][z + 12]) - 8.92857123e-5F*(u[t0][x - time + 8][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 8][z + 12] + u[t0][x - time + 12][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 12][z + 16] + u[t0][x - time + 12][y - time + 16][z + 12] + u[t0][x - time + 16][y - time + 12][z + 12]) + 5.29100517e-4F*(u[t0][x - time + 9][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 9][z + 12] + u[t0][x - time + 12][y - time + 12][z + 9] + u[t0][x - time + 12][y - time + 12][z + 15] + u[t0][x - time + 12][y - time + 15][z + 12] + u[t0][x - time + 15][y - time + 12][z + 12]) - 2.67857137e-3F*(u[t0][x - time + 10][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 10][z + 12] + u[t0][x - time + 12][y - time + 12][z + 10] + u[t0][x - time + 12][y - time + 12][z + 14] + u[t0][x - time + 12][y - time + 14][z + 12] + u[t0][x - time + 14][y - time + 12][z + 12]) + 1.71428568e-2F*(u[t0][x - time + 11][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 11][z + 12] + u[t0][x - time + 12][y - time + 12][z + 11] + u[t0][x - time + 12][y - time + 12][z + 13] + u[t0][x - time + 12][y - time + 13][z + 12] + u[t0][x - time + 13][y - time + 12][z + 12]) - 8.94833313e-2F*u[t0][x - time + 12][y - time + 12][z + 12];
float r116 = r129*(-2.0F*u[t0][x - time + 12][y - time + 12][z + 12] + u[t1][x - time + 12][y - time + 12][z + 12]);
float r117 = r129*(-2.0F*v[t0][x - time + 12][y - time + 12][z + 12] + v[t1][x - time + 12][y - time + 12][z + 12]);
u[t2][x - time + 12][y - time + 12][z + 12] = r124*((-r116)*r125 + r123*(2*epsilon[x - time + 12][y - time + 12][z + 12] + 1) + r130*(damp[x - time + 1][y - time + 1][z + 1]*u[t0][x - time + 12][y - time + 12][z + 12]) + (r126 + r127 + r128)*r73[x - time + 3][y - time + 3][z + 3]);
v[t2][x - time + 12][y - time + 12][z + 12] = r124*((-r117)*r125 + r123*r73[x - time + 3][y - time + 3][z + 3] + r126 + r127 + r128 + r130*(damp[x - time + 1][y - time + 1][z + 1]*v[t0][x - time + 12][y - time + 12][z + 12]));
}
//int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
u[t2][x - time + 12][y - time + 12][zind + 12] += r0;
float r1 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
v[t2][x - time + 12][y - time + 12][zind + 12] += r1;
printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 12, y - time + 12, zind + 8, r0, r1);
}
}
}
}
}
}
}
|
GB_unaryop__abs_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_uint8
// op(A') function: GB_tran__abs_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
binarySearch_openmp.c | #include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include <sys/time.h> // High resolution timer
#include <omp.h>
const int ARR_SIZE = 200000000; // Add 200,000,000 elements into array
int randomNums[ARR_SIZE]; // Array of random integers
int parallel_search_found = 0;
double omp_get_wtime(void);
// High resolution timer
inline uint64_t rdtsc()
{
uint32_t lo, hi;
__asm__ __volatile__(
"xorl %%eax, %%eax\n"
"cpuid\n"
"rdtsc\n"
: "=a"(lo), "=d"(hi)
:
: "%ebx", "%ecx");
return (uint64_t)hi << 32 | lo;
}
/*
void readInputFile() -
Takes contents from file input1.txt and places it into array randomNums[]
*/
void readInputFile()
{
FILE *inputFile;
inputFile = fopen("input1.txt", "r");
int i;
if (inputFile == NULL)
{
printf("Error Reading File input1.txt\n");
exit(0);
}
printf("Populating randomNums[] with data from input1.txt...\n");
printf("(This may take a while)\n");
// Place file contents into array randomNums[]
for (i = 0; i < ARR_SIZE; i++)
{
fscanf(inputFile, "%d,", &randomNums[i]);
}
// // Print a few values from array to verify elements
// for (int i = 0; i < 4; i++) {
// printf("%d\n", randomNums[i]);
// }
printf("Finished inserting %d elements into randomNums[]\n", ARR_SIZE);
fclose(inputFile);
printf("\n");
}
/*
void binarySearch_serial() -
Performs serial binary search on randomNums[]
*/
int binarySearch_serial(int searchVal)
{
int num_elements = ARR_SIZE;
int first = 0;
int last = num_elements - 1;
while (first <= last)
{
int middle = first + (last - first) / 2;
// Check if search value is present at mid
if (randomNums[middle] == searchVal)
return middle;
// If search value greater, ignore left half
if (randomNums[middle] < searchVal)
{
first = middle + 1;
}
// If search value is smaller, ignore right half
else
last = middle - 1;
}
// if we reach here, then element was
// not present
return -1;
}
/*
void binarySearch_openmp() -
Performs parallel binary search using OpenMP on randomNums[]
*/
int binarySearch_openmp(int first, int last, int searchVal)
{
// // Print current thread number
// int tid = omp_get_thread_num();
// printf("Hello World from thread = %d\n", tid);
while (first <= last)
{
int middle = first + (last - first) / 2;
// Check if search value is present at mid
if (randomNums[middle] == searchVal)
return middle;
// If search value greater, ignore left half
if (randomNums[middle] < searchVal)
{
first = middle + 1;
}
// If search value is smaller, ignore right half
else
last = middle - 1;
}
// if we reach here, then element was
// not present
return -1;
}
/*
void serial_work() -
Performs all work related to Serial code, including:
- All print statements
- Timing of Serial work
- Binary search function calls
*/
void serial_work(int searchVal)
{
int result;
double start, end, total_time;
printf("\n****** Now beginning Serial work ******\n\n");
printf("Starting binary search...\n");
start = omp_get_wtime();
// Perform serial Binary search with timing
result = binarySearch_serial(searchVal); // Binary search here
end = omp_get_wtime();
total_time = end - start;
printf("Work took %f seconds\n", total_time);
// Print results of serial Binary search
if (result != -1)
{
printf("Element %d found! At index %d\n", searchVal, result);
}
else
{
printf("Element %d not found\n", searchVal);
}
printf("\n");
}
/*
void parallel_work() -
Performs all work related to Parallelized code, including:
- All print statements
- Timing of parallel work
- Binary search function calls
*/
void parallel_work(int searchVal, int num_threads)
{
int result;
double start, end, total_time;
// For use with Parallel search
int first = 0;
int last = ARR_SIZE - 1;
int middle = first + (last - first) / 2;
// Array will be sliced into sections
int thread_one, thread_two, thread_three, thread_four;
int quarter_slice = middle / 2;
printf("\n****** Now beginning Parallel work with OpenMP ******\n\n");
printf("Starting binary search...\n");
start = omp_get_wtime();
#pragma omp parallel num_threads(num_threads)
{
#pragma omp sections
{
/* Function parameters:
binarySearch_openmp(first_index, last_index, search_value);
*/
#pragma omp section
thread_one = binarySearch_openmp(0, quarter_slice, searchVal);
#pragma omp section
thread_two = binarySearch_openmp(quarter_slice + 1, middle, searchVal);
#pragma omp section
thread_three = binarySearch_openmp(middle + 1, quarter_slice * 3, searchVal);
#pragma omp section
thread_four = binarySearch_openmp((quarter_slice * 3) + 1, last, searchVal);
}
}
end = omp_get_wtime();
total_time = end - start;
printf("Work took %f seconds\n", total_time);
// Print results of serial Binary search
if (result != -1)
{
printf("Element %d found! At index %d\n", searchVal, result);
}
else
{
printf("Element %d not found\n", searchVal);
}
printf("\n");
}
int main(int argc, char *argv[])
{
int searchVal, num_threads;
printf("Enter value to search for: \n");
scanf("%d", &searchVal);
printf("How many threads to run on: \n");
scanf("%d", &num_threads);
// Read contents of input file and populate array
readInputFile();
// Perform all serial work
serial_work(searchVal);
// Rewrite contents to array for consistency purposes
readInputFile();
// Perform all parallelized work
parallel_work(searchVal, num_threads);
return 0;
}
|
direct.h | #include <math.h>
#include <stdio.h>
#include <iostream>
#include <sys/time.h>
#include <omp.h>
#define REAL double
double get_time (void);
REAL norm(REAL *x);
void cross(REAL *x, REAL *y, REAL *z);
void MV(REAL *M, REAL *V, REAL *res);
REAL dot_prod(REAL *x, REAL *y);
void axpy(REAL *x, REAL *y, REAL *z, REAL alpha, int sign, int N);
void ax(REAL *x, REAL *y, REAL alpha, int N);
void lineInt(REAL &PHI_K, REAL &PHI_V, REAL z, REAL x, REAL v1, REAL v2, REAL kappa, REAL *xk, REAL *wk, int K, int LorY);
void intSide(REAL &PHI_K, REAL &PHI_V, REAL *v1, REAL *v2, REAL p, REAL kappa, REAL *xk, REAL *wk, int K, int LorY);
void SA(REAL &PHI_K, REAL &PHI_V, REAL *y, REAL *x, REAL kappa, int same,
REAL K_diag, REAL V_diag, int LorY, REAL *xk, int xkSize, REAL *wk);
void computeDiagonal_cy(REAL *VL, int VLSize, REAL *KL, int KLSize, REAL *VY, int VYSize, REAL *KY, int KYSize,
REAL *triangle, int triangleSize, REAL *centers, int centersSize, REAL kappa,
REAL K_diag, REAL V_diag, REAL *xk, int xkSize, REAL *wk, int wkSize);
void GQ_fine(REAL &PHI_K, REAL &PHI_V, REAL *panel, REAL xi, REAL yi, REAL zi,
REAL kappa, REAL *Xk, REAL *Wk, int K_fine, REAL Area, int LorY);
void GQ_fineKt(REAL &PHI_Ktx, REAL &PHI_Kty, REAL &PHI_Ktz, REAL *panel,
REAL xi, REAL yi, REAL zi, REAL kappa, REAL *Xk, REAL *Wk,
int K_fine, REAL Area, int LorY);
void direct_c_cy(int LorY, REAL K_diag, REAL V_diag, int IorE, REAL *triangle, int triangleSize,
int *tri, int triSize, int *k, int kSize, REAL *xi, int xiSize, REAL *yi, int yiSize,
REAL *zi, int ziSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize,
REAL *s_zj, int s_zjSize, REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mx, int mxSize, REAL *my, int mySize, REAL *mz, int mzSize, REAL *mKclean, int mKcleanSize, REAL *mVclean, int mVcleanSize,
int *target, int targetSize,REAL *Area, int AreaSize, REAL *sglInt_int, int sglInt_intSize, REAL *sglInt_ext, int sglInt_extSize,
REAL *xk, int xkSize, REAL *wk, int wkSize, REAL *Xsk, int XskSize, REAL *Wsk, int WskSize,
REAL kappa, REAL threshold, REAL eps, REAL w0, int AI_int, REAL *phi_reac, int phi_reacSize);
void direct_sort_cy(REAL *K_aux, int K_auxSize, REAL *V_aux, int V_auxSize, int LorY, REAL K_diag, REAL V_diag, int IorE, REAL *triangle, int triangleSize,
int *tri, int triSize, int *k, int kSize, REAL *xi, int xiSize, REAL *yi, int yiSize,
REAL *zi, int ziSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize,
REAL *s_zj, int s_zjSize, REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mx, int mxSize, REAL *my, int mySize, REAL *mz, int mzSize, REAL *mKclean, int mKcleanSize, REAL *mVclean, int mVcleanSize,
int *interList, int interListSize, int *offTar, int offTarSize, int *sizeTar, int sizeTarSize, int *offSrc, int offSrcSize, int *offTwg, int offTwgSize,
int *target, int targetSize,REAL *Area, int AreaSize, REAL *sglInt_int, int sglInt_intSize, REAL *sglInt_ext, int sglInt_extSize,
REAL *xk, int xkSize, REAL *wk, int wkSize, REAL *Xsk, int XskSize, REAL *Wsk, int WskSize,
REAL kappa, REAL threshold, REAL eps, REAL w0, REAL *aux, int auxSize);
void directKt_sort_cy(REAL *Ktx_aux, int Ktx_auxSize, REAL *Kty_aux, int Kty_auxSize, REAL *Ktz_aux, int Ktz_auxSize,
int LorY, REAL *triangle, int triangleSize,
int *k, int kSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize, REAL *s_zj, int s_zjSize,
REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mKclean, int mKcleanSize,
int *interList, int interListSize, int *offTar, int offTarSize, int *sizeTar, int sizeTarSize,
int *offSrc, int offSrcSize, int *offTwg, int offTwgSize, REAL *Area, int AreaSize,
REAL *Xsk, int XskSize, REAL *Wsk, int WskSize, REAL kappa, REAL threshold, REAL eps, REAL *aux, int auxSize);
void coulomb_direct_cy(REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *K_aux, int K_auxSize);
void direct_c_derivative_cy(REAL *dKx_aux, int dKx_auxSize, REAL *dKy_aux, int dKy_auxSize, REAL *dKz_aux, int dKz_auxSize,
REAL *dVx_aux, int dVx_auxSize, REAL *dVy_aux, int dVy_auxSize, REAL *dVz_aux, int dVz_auxSize,
int LorY, REAL K_diag, REAL V_diag, int IorE, REAL *triangle, int triangleSize,
int *tri, int triSize, int *k, int kSize, REAL *xi, int xiSize, REAL *yi, int yiSize,
REAL *zi, int ziSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize,
REAL *s_zj, int s_zjSize, REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mx, int mxSize, REAL *my, int mySize, REAL *mz, int mzSize, REAL *mKclean, int mKcleanSize, REAL *mVclean, int mVcleanSize,
int *target, int targetSize,REAL *Area, int AreaSize, REAL *sglInt_int, int sglInt_intSize, REAL *sglInt_ext, int sglInt_extSize,
REAL *xk, int xkSize, REAL *wk, int wkSize, REAL *Xsk, int XskSize, REAL *Wsk, int WskSize,
REAL kappa, REAL threshold, REAL eps, REAL w0, REAL *aux, int auxSize);
double get_time (void)
{
struct timeval tv;
gettimeofday(&tv,NULL);
return (double)(tv.tv_sec+1e-6*tv.tv_usec);
};
REAL norm(REAL *x)
{
return sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2]);
};
void cross(REAL *x, REAL *y, REAL *z) // z is the resulting array
{
z[0] = x[1]*y[2] - x[2]*y[1];
z[1] = x[2]*y[0] - x[0]*y[2];
z[2] = x[0]*y[1] - x[1]*y[0];
};
void MV(REAL *M, REAL *V, REAL *res) // 3x3 mat-vec
{
REAL V2[3] = {V[0], V[1], V[2]};
for (int i=0; i<3; i++)
{
REAL sum = 0.;
for (int j=0; j<3; j++)
{
sum += M[3*i+j]*V2[j];
}
res[i] = sum;
}
};
REAL dot_prod(REAL *x, REAL *y) // len(3) vector dot product
{
return x[0]*y[0] + x[1]*y[1] + x[2]*y[2];
};
void axpy(REAL *x, REAL *y, REAL *z, REAL alpha, int sign, int N)
{
for(int i=0; i<N; i++)
{
z[i] = sign*alpha*x[i] + y[i];
}
};
void ax(REAL *x, REAL *y, REAL alpha, int N)
{
for(int i=0; i<N; i++)
{
y[i] = alpha*x[i];
}
};
void lineInt(REAL &PHI_K, REAL &PHI_V, REAL z, REAL x, REAL v1, REAL v2, REAL kappa, REAL *xk, REAL *wk, int K, int LorY)
{
REAL theta1 = atan2(v1,x);
REAL theta2 = atan2(v2,x);
REAL dtheta = theta2 - theta1;
REAL thetam = (theta2 + theta1)/2;
REAL absZ = fabs(z), signZ;
if (absZ<1e-10) signZ = 0;
else signZ = z/absZ;
// Loop over gauss points
REAL thetak, Rtheta, R, expKr, expKz;
if (LorY==2)
expKz = exp(-kappa*absZ);
for (int i=0; i<K; i++)
{
thetak = dtheta/2*xk[i] + thetam;
Rtheta = x/cos(thetak);
R = sqrt(Rtheta*Rtheta + z*z);
expKr = exp(-kappa*R);
if (LorY==2)
{
if (kappa>1e-12)
{
PHI_V += -wk[i]*(expKr - expKz)/kappa * dtheta/2;
PHI_K += wk[i]*(z/R*expKr - expKz*signZ) * dtheta/2;
}
else
{
PHI_V += wk[i]*(R-absZ) * dtheta/2;
PHI_K += wk[i]*(z/R - signZ) * dtheta/2;
}
}
if (LorY==1)
{
PHI_V += wk[i]*(R-absZ) * dtheta/2;
PHI_K += wk[i]*(z/R - signZ) * dtheta/2;
}
}
};
void intSide(REAL &PHI_K, REAL &PHI_V, REAL *v1, REAL *v2, REAL p, REAL kappa, REAL *xk, REAL *wk, int K, int LorY)
{
REAL v21[3];
for (int i=0; i<3; i++)
{
v21[i] = v2[i] - v1[i];
}
REAL L21 = norm(v21);
REAL v21u[3];
ax(v21, v21u, 1/L21, 3);
REAL unit[3] = {0.,0.,1.};
REAL orthog[3];
cross(unit, v21u, orthog);
REAL alpha = dot_prod(v21,v1)/(L21*L21);
REAL rOrthog[3];
axpy(v21, v1, rOrthog, alpha, -1, 3);
//REAL d_toEdge = norm(rOrthog);
REAL v1_neg[3];
ax(v1, v1_neg, -1, 3);
REAL side_vec[3];
cross(v21, v1_neg, side_vec);
REAL rotateToVertLine[9];
for(int i=0; i<3; i++)
{
rotateToVertLine[3*i] = orthog[i];
rotateToVertLine[3*i+1] = v21u[i];
rotateToVertLine[3*i+2] = unit[i];
}
REAL v1new[3];
MV(rotateToVertLine,v1,v1new);
if (v1new[0]<0)
{
ax(v21u, v21u, -1, 3);
ax(orthog, orthog, -1, 3);
ax(rotateToVertLine, rotateToVertLine, -1, 9);
rotateToVertLine[8] = 1.;
MV(rotateToVertLine,v1,v1new);
}
REAL v2new[3], rOrthognew[3];
MV(rotateToVertLine,v2,v2new);
MV(rotateToVertLine,rOrthog,rOrthognew);
REAL x = v1new[0];
if ((v1new[1]>0 && v2new[1]<0) || (v1new[1]<0 && v2new[1]>0))
{
REAL PHI1_K = 0. , PHI2_K = 0.;
REAL PHI1_V = 0. , PHI2_V = 0.;
lineInt(PHI1_K, PHI1_V, p, x, 0, v1new[1], kappa, xk, wk, K, LorY);
lineInt(PHI2_K, PHI2_V, p, x, v2new[1], 0, kappa, xk, wk, K, LorY);
PHI_K += PHI1_K + PHI2_K;
PHI_V += PHI1_V + PHI2_V;
}
else
{
REAL PHI_Kaux = 0., PHI_Vaux = 0.;
lineInt(PHI_Kaux, PHI_Vaux, p, x, v1new[1], v2new[1], kappa, xk, wk, K, LorY);
PHI_K -= PHI_Kaux;
PHI_V -= PHI_Vaux;
}
};
void SA(REAL &PHI_K, REAL &PHI_V, REAL *y, REAL *x, REAL kappa, int same,
REAL K_diag, REAL V_diag, int LorY, REAL *xk, int xkSize, REAL *wk)
{
// Put first panel at origin
REAL y0_panel[3], y1_panel[3], y2_panel[3], x_panel[3];
REAL X[3], Y[3], Z[3];
for (int i=0; i<3;i++)
{
x_panel[i] = x[i] - y[i];
y0_panel[i] = 0.;
y1_panel[i] = y[3+i] - y[i];
y2_panel[i] = y[6+i] - y[i];
X[i] = y1_panel[i];
}
// Find panel coordinate system X: 0->1
cross(y1_panel, y2_panel, Z);
REAL Xnorm = norm(X);
REAL Znorm = norm(Z);
for (int i=0; i<3; i++)
{
X[i] /= Xnorm;
Z[i] /= Znorm;
}
cross(Z,X,Y);
// Rotate the coordinate system to match panel plane
REAL rot_matrix[9];
for (int i=0; i<3; i++)
{
rot_matrix[i] = X[i];
rot_matrix[i+3] = Y[i];
rot_matrix[i+6] = Z[i];
}
REAL panel0_plane[3], panel1_plane[3], panel2_plane[3], x_plane[3];
MV(rot_matrix, y0_panel, panel0_plane);
MV(rot_matrix, y1_panel, panel1_plane);
MV(rot_matrix, y2_panel, panel2_plane);
MV(rot_matrix, x_panel, x_plane);
// Shift origin so it matches collocation point
REAL panel0_final[3], panel1_final[3], panel2_final[3];
for (int i=0; i<3; i++)
{
if (i<2)
{
panel0_final[i] = panel0_plane[i] - x_plane[i];
panel1_final[i] = panel1_plane[i] - x_plane[i];
panel2_final[i] = panel2_plane[i] - x_plane[i];
}
else
{
panel0_final[i] = panel0_plane[i];
panel1_final[i] = panel1_plane[i];
panel2_final[i] = panel2_plane[i];
}
}
// Loop over sides
intSide(PHI_K, PHI_V, panel0_final, panel1_final, x_plane[2], kappa, xk, wk, xkSize, LorY); // Side 0
intSide(PHI_K, PHI_V, panel1_final, panel2_final, x_plane[2], kappa, xk, wk, xkSize, LorY); // Side 1
intSide(PHI_K, PHI_V, panel2_final, panel0_final, x_plane[2], kappa, xk, wk, xkSize, LorY); // Side 2
if (same==1)
{
PHI_K += K_diag;
PHI_V += V_diag;
}
};
void computeDiagonal_cy(REAL *VL, int VLSize, REAL *KL, int KLSize, REAL *VY, int VYSize, REAL *KY, int KYSize,
REAL *triangle, int triangleSize, REAL *centers, int centersSize, REAL kappa,
REAL K_diag, REAL V_diag, REAL *xk, int xkSize, REAL *wk, int wkSize)
{
int N = VLSize, LorY;
REAL PHI_K, PHI_V;
for(int i=0; i<N; i++)
{
REAL panel[9] = {triangle[9*i], triangle[9*i+1], triangle[9*i+2],
triangle[9*i+3], triangle[9*i+4], triangle[9*i+5],
triangle[9*i+6], triangle[9*i+7], triangle[9*i+8]};
REAL center[3] = {centers[3*i], centers[3*i+1], centers[3*i+2]};
PHI_K = 0.;
PHI_V = 0.;
LorY = 1; // Laplace
SA(PHI_K, PHI_V, panel, center, 1e-12, 1,
K_diag, V_diag, LorY, xk, xkSize, wk);
VL[i] = PHI_V;
KL[i] = PHI_K;
PHI_K = 0.;
PHI_V = 0.;
LorY = 2; // Yukawa
SA(PHI_K, PHI_V, panel, center, kappa, 1,
K_diag, V_diag, LorY, xk, xkSize, wk);
VY[i] = PHI_V;
KY[i] = PHI_K;
}
};
void GQ_fine(REAL &PHI_K, REAL &PHI_V, REAL *panel, REAL xi, REAL yi, REAL zi,
REAL kappa, REAL *Xk, REAL *Wk, int K_fine, REAL Area, int LorY)
{
REAL nx, ny, nz;
REAL dx, dy, dz, r, aux;
PHI_K = 0.;
PHI_V = 0.;
aux = 1/(2*Area);
nx = ((panel[4]-panel[1])*(panel[2]-panel[8]) - (panel[5]-panel[2])*(panel[1]-panel[7])) * aux;
ny = ((panel[5]-panel[2])*(panel[0]-panel[6]) - (panel[3]-panel[0])*(panel[2]-panel[8])) * aux;
nz = ((panel[3]-panel[0])*(panel[1]-panel[7]) - (panel[4]-panel[1])*(panel[0]-panel[6])) * aux;
#pragma unroll
for (int kk=0; kk<K_fine; kk++)
{
dx = xi - (panel[0]*Xk[3*kk] + panel[3]*Xk[3*kk+1] + panel[6]*Xk[3*kk+2]);
dy = yi - (panel[1]*Xk[3*kk] + panel[4]*Xk[3*kk+1] + panel[7]*Xk[3*kk+2]);
dz = zi - (panel[2]*Xk[3*kk] + panel[5]*Xk[3*kk+1] + panel[8]*Xk[3*kk+2]);
r = 1/sqrt(dx*dx + dy*dy + dz*dz); // r is 1/r!!!
if (LorY==1)
{
aux = Wk[kk]*Area*r;
PHI_V += aux;
PHI_K += aux*(nx*dx+ny*dy+nz*dz)*(r*r);
}
else
{
aux = Wk[kk]*Area*exp(-kappa*1/r)*r;
PHI_V += aux;
PHI_K += aux*(nx*dx+ny*dy+nz*dz)*r*(kappa+r);
}
}
};
void GQ_fineKt(REAL &PHI_Ktx, REAL &PHI_Kty, REAL &PHI_Ktz, REAL *panel,
REAL xi, REAL yi, REAL zi, REAL kappa, REAL *Xk, REAL *Wk,
int K_fine, REAL Area, int LorY)
{
REAL dx, dy, dz, r, aux;
PHI_Ktx = 0.;
PHI_Kty = 0.;
PHI_Ktz = 0.;
#pragma unroll
for (int kk=0; kk<K_fine; kk++)
{
dx = xi - (panel[0]*Xk[3*kk] + panel[3]*Xk[3*kk+1] + panel[6]*Xk[3*kk+2]);
dy = yi - (panel[1]*Xk[3*kk] + panel[4]*Xk[3*kk+1] + panel[7]*Xk[3*kk+2]);
dz = zi - (panel[2]*Xk[3*kk] + panel[5]*Xk[3*kk+1] + panel[8]*Xk[3*kk+2]);
r = 1/sqrt(dx*dx + dy*dy + dz*dz); // r is 1/r!!!
if (LorY==1)
{
aux = Wk[kk]*Area*r*r*r;
PHI_Ktx -= aux*dx;
PHI_Kty -= aux*dy;
PHI_Ktz -= aux*dz;
}
else
{
aux = Wk[kk]*Area*exp(-kappa*1/r)*r*r*(kappa+r);
PHI_Ktx -= aux*dx;
PHI_Kty -= aux*dy;
PHI_Ktz -= aux*dz;
}
}
};
void GQ_fine_derivative(REAL &dPHI_Kx, REAL &dPHI_Ky, REAL &dPHI_Kz,
REAL &dPHI_Vx, REAL &dPHI_Vy, REAL &dPHI_Vz,
REAL *panel, REAL xi, REAL yi, REAL zi, REAL kappa,
REAL *Xk, REAL *Wk, int K_fine, REAL Area, int LorY)
{
REAL nx, ny, nz;
REAL dx, dy, dz, r, r3, aux;
dPHI_Kx = 0.;
dPHI_Ky = 0.;
dPHI_Kz = 0.;
dPHI_Vx = 0.;
dPHI_Vy = 0.;
dPHI_Vz = 0.;
aux = 1/(2*Area);
nx = ((panel[4]-panel[1])*(panel[2]-panel[8]) - (panel[5]-panel[2])*(panel[1]-panel[7])) * aux;
ny = ((panel[5]-panel[2])*(panel[0]-panel[6]) - (panel[3]-panel[0])*(panel[2]-panel[8])) * aux;
nz = ((panel[3]-panel[0])*(panel[1]-panel[7]) - (panel[4]-panel[1])*(panel[0]-panel[6])) * aux;
#pragma unroll
for (int kk=0; kk<K_fine; kk++)
{
dx = xi - (panel[0]*Xk[3*kk] + panel[3]*Xk[3*kk+1] + panel[6]*Xk[3*kk+2]);
dy = yi - (panel[1]*Xk[3*kk] + panel[4]*Xk[3*kk+1] + panel[7]*Xk[3*kk+2]);
dz = zi - (panel[2]*Xk[3*kk] + panel[5]*Xk[3*kk+1] + panel[8]*Xk[3*kk+2]);
r = 1/sqrt(dx*dx + dy*dy + dz*dz); // r is 1/r!!!
r3 = r*r*r;
if (LorY==1)
{
aux = Wk[kk]*Area*r3;
dPHI_Vx -= dx*aux;
dPHI_Vy -= dy*aux;
dPHI_Vz -= dz*aux;
dPHI_Kx += aux*nx-3*aux*dx*(nx*dx+ny*dy+nz*dz)*(r*r);
dPHI_Ky += aux*ny-3*aux*dy*(nx*dx+ny*dy+nz*dz)*(r*r);
dPHI_Kz += aux*nz-3*aux*dz*(nx*dx+ny*dy+nz*dz)*(r*r);
}
else // this else will never fire as this function is only used to calculate energy (always Laplace)
{
aux = Wk[kk]*Area*exp(-kappa*1/r)*r;
dPHI_Vx += aux;
dPHI_Kx += aux*(nx*dx+ny*dy+nz*dz)*r*(kappa+r);
}
}
};
void direct_c_cy(int LorY, REAL K_diag, REAL V_diag, int IorE, REAL *triangle, int triangleSize,
int *tri, int triSize, int *k, int kSize, REAL *xi, int xiSize, REAL *yi, int yiSize,
REAL *zi, int ziSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize,
REAL *s_zj, int s_zjSize, REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mx, int mxSize, REAL *my, int mySize, REAL *mz, int mzSize, REAL *mKclean, int mKcleanSize, REAL *mVclean, int mVcleanSize,
int *target, int targetSize,REAL *Area, int AreaSize, REAL *sglInt_int, int sglInt_intSize, REAL *sglInt_ext, int sglInt_extSize,
REAL *xk, int xkSize, REAL *wk, int wkSize, REAL *Xsk, int XskSize, REAL *Wsk, int WskSize,
REAL kappa, REAL threshold, REAL eps, REAL w0, int AI_int, REAL *phi_reac, int phi_reacSize)
{
int N_source = s_xjSize;
REAL dx, dy, dz, dx_tri, dy_tri, dz_tri, R, R2, R3, R_tri, expKr;
bool L_d, same, condition_an, condition_gq;
#pragma omp parallel for default(none) shared(N_source, xt, xi, yt, yi, zt, zi, tri, Area, eps, threshold, k, s_xj, s_yj, s_zj, LorY, kappa, m, mx, my, mz, triangle, K_diag, sglInt_int, sglInt_ext, Xsk, Wsk, WskSize, mVclean, mKclean, IorE, xtSize, AI_int, phi_reac) private(dx_tri, dy_tri, dz_tri, R_tri, L_d, same, condition_an, condition_gq, dx, dy, dz, R, R2, R3, expKr)
for(int i_tar = 0; i_tar < xtSize; i_tar++)
{
REAL K_aux = 0.0;
REAL V_aux = 0.0;
int aux = 0;
int i = -1;
REAL V_red = 0.0;
REAL K_red = 0.0;
for(int j=0; j<N_source; j++)
{
// Check if panels are far enough for Gauss quadrature
dx_tri = xt[i_tar] - xi[tri[j]];
dy_tri = yt[i_tar] - yi[tri[j]];
dz_tri = zt[i_tar] - zi[tri[j]];
R_tri = sqrt(dx_tri*dx_tri + dy_tri*dy_tri + dz_tri*dz_tri);
L_d = (sqrt(2*Area[tri[j]])/(R_tri+eps)>=threshold);
same = (i==tri[j]);
condition_an = ((same || L_d) && (k[j]==0));
condition_gq = (!L_d);
if(condition_gq)
{
dx = xt[i_tar] - s_xj[j];
dy = yt[i_tar] - s_yj[j];
dz = zt[i_tar] - s_zj[j];
R = sqrt(dx*dx + dy*dy + dz*dz + eps*eps);
R2 = R*R;
R3 = R2*R;
if (LorY==2)
{
expKr = exp(-kappa*R);
V_red += m[j]*expKr/R;
K_red += expKr/R2*(kappa+1/R) * (dx*mx[j] + dy*my[j] + dz*mz[j]);
}
if (LorY==1)
{
V_red += m[j]/R;
K_red += 1/R3*(dx*mx[j] + dy*my[j] + dz*mz[j]);
}
}
if(condition_an)
{
#pragma omp atomic
aux += 1;
REAL panel[9] = {triangle[9*tri[j]], triangle[9*tri[j]+1], triangle[9*tri[j]+2],
triangle[9*tri[j]+3], triangle[9*tri[j]+4], triangle[9*tri[j]+5],
triangle[9*tri[j]+6], triangle[9*tri[j]+7], triangle[9*tri[j]+8]};
REAL PHI_K = 0., PHI_V = 0.;
if (same==1)
{
PHI_K = K_diag;
if (IorE==1)
PHI_V = sglInt_int[j];
else
PHI_V = sglInt_ext[j];
}
else
{
GQ_fine(PHI_K, PHI_V, panel, xt[i_tar], yt[i_tar], zt[i_tar], kappa, Xsk, Wsk, WskSize, Area[tri[j]], LorY);
}
V_red += PHI_V * mVclean[j];
K_red += PHI_K * mKclean[j];
}
}
V_aux += V_red;
K_aux += K_red;
AI_int += aux;
phi_reac[i_tar] = (-K_aux + V_aux) / (4 * 3.14159265358979323846);
}
};
void direct_sort_cy(REAL *K_aux, int K_auxSize, REAL *V_aux, int V_auxSize, int LorY, REAL K_diag, REAL V_diag, int IorE, REAL *triangle, int triangleSize,
int *tri, int triSize, int *k, int kSize, REAL *xi, int xiSize, REAL *yi, int yiSize,
REAL *zi, int ziSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize,
REAL *s_zj, int s_zjSize, REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mx, int mxSize, REAL *my, int mySize, REAL *mz, int mzSize, REAL *mKclean, int mKcleanSize, REAL *mVclean, int mVcleanSize,
int *interList, int interListSize, int *offTar, int offTarSize, int *sizeTar, int sizeTarSize, int *offSrc, int offSrcSize, int *offTwg, int offTwgSize,
int *target, int targetSize,REAL *Area, int AreaSize, REAL *sglInt_int, int sglInt_intSize, REAL *sglInt_ext, int sglInt_extSize,
REAL *xk, int xkSize, REAL *wk, int wkSize, REAL *Xsk, int XskSize, REAL *Wsk, int WskSize,
REAL kappa, REAL threshold, REAL eps, REAL w0, REAL *aux, int auxSize)
{
double start, stop;
int CI_start, CI_end, CJ_start, CJ_end, list_start, list_end, CJ;
REAL dx, dy, dz, dx_tri, dy_tri, dz_tri, R, R2, R3, R_tri, expKr, sum_K, sum_V;
bool L_d, same, condition_an, condition_gq;
for (int tarTwg=0; tarTwg<offTarSize; tarTwg++)
{
CI_start = offTar[tarTwg];
CI_end = offTar[tarTwg] + sizeTar[tarTwg];
list_start = offTwg[tarTwg];
list_end = offTwg[tarTwg+1];
#pragma omp parallel for private(sum_K, sum_V, CJ, CJ_start, CJ_end, dx_tri, dy_tri, dz_tri, R_tri, L_d, same, condition_an, condition_gq, dx, dy, dz, R, R2, R3, expKr, start, stop) shared(list_start, list_end, interList, offSrc, triangle, xt, yt, zt, Area, eps, threshold, k, s_xj, s_yj, s_zj, LorY, kappa, m, mx, my, mz, aux, K_diag, IorE, sglInt_int, sglInt_ext, Xsk, XskSize, Wsk, WskSize, mVclean, mKclean, V_aux, K_aux) schedule(runtime)
for(int i=CI_start; i<CI_end; i++)
{
sum_K = 0.;
sum_V = 0.;
for (int lst=list_start; lst<list_end; lst++)
{
CJ = interList[lst];
CJ_start = offSrc[CJ];
CJ_end = offSrc[CJ+1];
for(int j=CJ_start; j<CJ_end; j++)
{
// Check if panels are far enough for Gauss quadrature
int ptr = 9*j;
REAL panel[9] = {triangle[ptr], triangle[ptr+1], triangle[ptr+2],
triangle[ptr+3], triangle[ptr+4], triangle[ptr+5],
triangle[ptr+6], triangle[ptr+7], triangle[ptr+8]};
dx_tri = xt[i] - (panel[0]+panel[3]+panel[6])/3;
dy_tri = yt[i] - (panel[1]+panel[4]+panel[7])/3;
dz_tri = zt[i] - (panel[2]+panel[5]+panel[8])/3;
R_tri = sqrt(dx_tri*dx_tri + dy_tri*dy_tri + dz_tri*dz_tri);
L_d = (sqrt(2*Area[j])/(R_tri+eps)>=threshold);
same = (R_tri<1e-12);
condition_an = ((L_d) && (k[j]==0));
condition_gq = (!L_d);
if(condition_gq)
{
dx = xt[i] - s_xj[j];
dy = yt[i] - s_yj[j];
dz = zt[i] - s_zj[j];
R = sqrt(dx*dx + dy*dy + dz*dz + eps*eps);
R2 = R*R;
R3 = R2*R;
if (LorY==2)
{
expKr = exp(-kappa*R);
sum_V += m[j]*expKr/R;
sum_K += expKr/R2*(kappa+1/R) * (dx*mx[j] + dy*my[j] + dz*mz[j]);
}
if (LorY==1)
{
sum_V += m[j]/R;
sum_K += 1/R3*(dx*mx[j] + dy*my[j] + dz*mz[j]);
}
}
if(condition_an)
{
start = get_time();
aux[0] += 1;
REAL PHI_K = 0., PHI_V = 0.;
if (same==1)
{
PHI_K = K_diag;
if (IorE==1)
PHI_V = sglInt_int[j];
else
PHI_V = sglInt_ext[j];
}
else
{
GQ_fine(PHI_K, PHI_V, panel, xt[i], yt[i], zt[i], kappa, Xsk, Wsk, WskSize, Area[j], LorY);
}
sum_V += PHI_V * mVclean[j];
sum_K += PHI_K * mKclean[j];
stop = get_time();
aux[1] += stop - start;
}
}
}
V_aux[i] += sum_V;
K_aux[i] += sum_K;
}
}
};
void directKt_sort_cy(REAL *Ktx_aux, int Ktx_auxSize, REAL *Kty_aux, int Kty_auxSize, REAL *Ktz_aux, int Ktz_auxSize,
int LorY, REAL *triangle, int triangleSize,
int *k, int kSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize, REAL *s_zj, int s_zjSize,
REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mKclean, int mKcleanSize,
int *interList, int interListSize, int *offTar, int offTarSize, int *sizeTar, int sizeTarSize,
int *offSrc, int offSrcSize, int *offTwg, int offTwgSize, REAL *Area, int AreaSize,
REAL *Xsk, int XskSize, REAL *Wsk, int WskSize, REAL kappa, REAL threshold, REAL eps, REAL *aux, int auxSize)
{
double start,stop;
int CI_start, CI_end, CJ_start, CJ_end, list_start, list_end, CJ;
REAL dx, dy, dz, dx_tri, dy_tri, dz_tri, R, R2, R3, R_tri, expKr, sum_Ktx, sum_Kty, sum_Ktz;
bool L_d, same, condition_an, condition_gq;
for (int tarTwg=0; tarTwg<offTarSize; tarTwg++)
{
CI_start = offTar[tarTwg];
CI_end = offTar[tarTwg] + sizeTar[tarTwg];
list_start = offTwg[tarTwg];
list_end = offTwg[tarTwg+1];
for(int i=CI_start; i<CI_end; i++)
{
sum_Ktx = 0.;
sum_Kty = 0.;
sum_Ktz = 0.;
for (int lst=list_start; lst<list_end; lst++)
{
CJ = interList[lst];
CJ_start = offSrc[CJ];
CJ_end = offSrc[CJ+1];
for(int j=CJ_start; j<CJ_end; j++)
{
// Check if panels are far enough for Gauss quadrature
//start = get_time();
int ptr = 9*j;
REAL panel[9] = {triangle[ptr], triangle[ptr+1], triangle[ptr+2],
triangle[ptr+3], triangle[ptr+4], triangle[ptr+5],
triangle[ptr+6], triangle[ptr+7], triangle[ptr+8]};
dx_tri = xt[i] - (panel[0]+panel[3]+panel[6])/3;
dy_tri = yt[i] - (panel[1]+panel[4]+panel[7])/3;
dz_tri = zt[i] - (panel[2]+panel[5]+panel[8])/3;
R_tri = sqrt(dx_tri*dx_tri + dy_tri*dy_tri + dz_tri*dz_tri);
L_d = (sqrt(2*Area[j])/(R_tri+eps)>=threshold);
same = (R_tri<1e-12);
condition_an = ((L_d) && (k[j]==0));
condition_gq = (!L_d);
if(condition_gq)
{
dx = xt[i] - s_xj[j];
dy = yt[i] - s_yj[j];
dz = zt[i] - s_zj[j];
R = sqrt(dx*dx + dy*dy + dz*dz + eps*eps);
R2 = R*R;
R3 = R2*R;
if (LorY==2)
{
expKr = m[j]*exp(-kappa*R)/R2*(kappa+1/R);
sum_Ktx -= expKr * dx;
sum_Kty -= expKr * dy;
sum_Ktz -= expKr * dz;
}
if (LorY==1)
{
expKr = m[j]/R3;
sum_Ktx -= expKr*dx;
sum_Kty -= expKr*dy;
sum_Ktz -= expKr*dz;
}
}
if(condition_an)
{
start = get_time();
aux[0] += 1;
REAL PHI_Ktx = 0.;
REAL PHI_Kty = 0.;
REAL PHI_Ktz = 0.;
if (same==1)
{
PHI_Ktx = 0;
PHI_Kty = 0;
PHI_Ktz = 0;
}
else
{
GQ_fineKt(PHI_Ktx, PHI_Kty, PHI_Ktz, panel, xt[i], yt[i], zt[i], kappa, Xsk, Wsk, WskSize, Area[j], LorY);
}
sum_Ktx += PHI_Ktx * mKclean[j];
sum_Kty += PHI_Kty * mKclean[j];
sum_Ktz += PHI_Ktz * mKclean[j];
stop = get_time();
aux[1] += stop - start;
}
}
}
Ktx_aux[i] += sum_Ktx;
Kty_aux[i] += sum_Kty;
Ktz_aux[i] += sum_Ktz;
}
}
};
void coulomb_direct_cy(REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *K_aux, int K_auxSize)
{
REAL sum, dx, dy, dz, r;
#pragma omp parallel for default(none) shared(xtSize, K_aux, xt, yt, zt, m) private(dx, dy, dz, r, sum) schedule(runtime)
for(int i=0; i<xtSize; i++)
{
sum = 0.;
for(int j=0; j<xtSize; j++)
{
if (i!=j)
{
dx = xt[i] - xt[j];
dy = yt[i] - yt[j];
dz = zt[i] - zt[j];
r = sqrt(dx*dx + dy*dy + dz*dz);
sum += m[j]/r;
}
}
K_aux[i] = m[i]*sum;
}
};
void direct_c_derivative_cy(REAL *dKx_aux, int dKx_auxSize, REAL *dKy_aux, int dKy_auxSize, REAL *dKz_aux, int dKz_auxSize,
REAL *dVx_aux, int dVx_auxSize, REAL *dVy_aux, int dVy_auxSize, REAL *dVz_aux, int dVz_auxSize,
int LorY, REAL K_diag, REAL V_diag, int IorE, REAL *triangle, int triangleSize,
int *tri, int triSize, int *k, int kSize, REAL *xi, int xiSize, REAL *yi, int yiSize,
REAL *zi, int ziSize, REAL *s_xj, int s_xjSize, REAL *s_yj, int s_yjSize,
REAL *s_zj, int s_zjSize, REAL *xt, int xtSize, REAL *yt, int ytSize, REAL *zt, int ztSize,
REAL *m, int mSize, REAL *mx, int mxSize, REAL *my, int mySize, REAL *mz, int mzSize, REAL *mKclean, int mKcleanSize, REAL *mVclean, int mVcleanSize,
int *target, int targetSize,REAL *Area, int AreaSize, REAL *sglInt_int, int sglInt_intSize, REAL *sglInt_ext, int sglInt_extSize,
REAL *xk, int xkSize, REAL *wk, int wkSize, REAL *Xsk, int XskSize, REAL *Wsk, int WskSize,
REAL kappa, REAL threshold, REAL eps, REAL w0, REAL *aux, int auxSize)
{
double start,stop;
int N_target = targetSize;
int N_source = s_xjSize;
REAL dx, dy, dz, dx_tri, dy_tri, dz_tri, R, R2, R3, R_tri, expKr;
bool L_d, same, condition_an, condition_gq;
for(int i_aux=0; i_aux<N_target; i_aux++)
{
int i = target[i_aux];
for(int j=0; j<N_source; j++)
{
// Check if panels are far enough for Gauss quadrature
dx_tri = xt[i_aux] - xi[tri[j]];
dy_tri = yt[i_aux] - yi[tri[j]];
dz_tri = zt[i_aux] - zi[tri[j]];
R_tri = sqrt(dx_tri*dx_tri + dy_tri*dy_tri + dz_tri*dz_tri);
L_d = (sqrt(2*Area[tri[j]])/(R_tri+eps)>=threshold);
same = (i==tri[j]);
condition_an = ((same || L_d) && (k[j]==0));
condition_gq = (!L_d);
if(condition_gq)
{
//start = get_time();
dx = xt[i_aux] - s_xj[j];
dy = yt[i_aux] - s_yj[j];
dz = zt[i_aux] - s_zj[j];
R = 1/sqrt(dx*dx + dy*dy + dz*dz + eps*eps);
R2 = R*R;
R3 = R2*R;
if (LorY==2) // this if never fires as this function is only used for energy calculations (only laplace)
{
expKr = exp(-kappa*R);
dVx_aux[i_aux] += m[j]*expKr*R;
dKx_aux[i_aux] += expKr*R2*(kappa+1*R) * (dx*mx[j] + dy*my[j] + dz*mz[j]);
}
if (LorY==1)
{
dVx_aux[i_aux] -= m[j]*dx*R3;
dVy_aux[i_aux] -= m[j]*dy*R3;
dVz_aux[i_aux] -= m[j]*dz*R3;
dKx_aux[i_aux] += mx[j]*R3-3*dx*R3*R2*(dx*mx[j] + dy*my[j] + dz*mz[j]);
dKy_aux[i_aux] += my[j]*R3-3*dy*R3*R2*(dx*mx[j] + dy*my[j] + dz*mz[j]);
dKz_aux[i_aux] += mz[j]*R3-3*dz*R3*R2*(dx*mx[j] + dy*my[j] + dz*mz[j]);
}
//stop = get_time();
//aux[1] += stop - start;
}
if(condition_an)
{
aux[0] += 1;
REAL center[3] = {xt[i_aux], yt[i_aux], zt[i_aux]};
REAL panel[9] = {triangle[9*tri[j]], triangle[9*tri[j]+1], triangle[9*tri[j]+2],
triangle[9*tri[j]+3], triangle[9*tri[j]+4], triangle[9*tri[j]+5],
triangle[9*tri[j]+6], triangle[9*tri[j]+7], triangle[9*tri[j]+8]};
REAL dPHI_Kx = 0., dPHI_Ky = 0., dPHI_Kz = 0., dPHI_Vx = 0., dPHI_Vy = 0., dPHI_Vz = 0.;
start = get_time();
if (same==1) // So far, this if will never fire, as we only use this function for energy calculation (never singular)
{
dPHI_Kx = K_diag;
if (IorE==1)
dPHI_Vx = sglInt_int[j];
else
dPHI_Vx = sglInt_ext[j];
}
else
{
GQ_fine_derivative(dPHI_Kx, dPHI_Ky, dPHI_Kz, dPHI_Vx, dPHI_Vy, dPHI_Vz, panel, xt[i_aux], yt[i_aux], zt[i_aux], kappa, Xsk, Wsk, WskSize, Area[tri[j]], LorY);
}
stop = get_time();
aux[1] += stop - start;
// printf("%f \t %f\n",PHI_V,mVclean[j]);
dVx_aux[i_aux] += dPHI_Vx * mVclean[j];
dVy_aux[i_aux] += dPHI_Vy * mVclean[j];
dVz_aux[i_aux] += dPHI_Vz * mVclean[j];
dKx_aux[i_aux] += dPHI_Kx * mKclean[j];
dKy_aux[i_aux] += dPHI_Ky * mKclean[j];
dKz_aux[i_aux] += dPHI_Kz * mKclean[j];
}
}
}
}
|
core_strsm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrsm.c, normal z -> s, Fri Sep 28 17:38:19 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trsm
*
* Solves one of the matrix equations
*
* \f[ op( A )\times X = \alpha B, \f] or
* \f[ X \times op( A ) = \alpha B, \f]
*
* where op( A ) is one of:
* \f[ op( A ) = A, \f]
* \f[ op( A ) = A^T, \f]
* \f[ op( A ) = A^T, \f]
*
* alpha is a scalar, X and B are m-by-n matrices, and
* A is a unit or non-unit, upper or lower triangular matrix.
* The matrix X overwrites B.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] m
* The number of rows of the matrix B. m >= 0.
*
* @param[in] n
* The number of columns of the matrix B. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* The lda-by-ka triangular matrix,
* where ka = m if side = PlasmaLeft,
* and ka = n if side = PlasmaRight.
* If uplo = PlasmaUpper, the leading k-by-k upper triangular part
* of the array A contains the upper triangular matrix, and the
* strictly lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading k-by-k lower triangular part
* of the array A contains the lower triangular matrix, and the
* strictly upper triangular part of A is not referenced.
* If diag = PlasmaUnit, the diagonal elements of A are also not
* referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in,out] B
* On entry, the ldb-by-n right hand side matrix B.
* On exit, if return value = 0, the ldb-by-n solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_strsm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb)
{
cblas_strsm(CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
(CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag,
m, n,
(alpha), A, lda,
B, ldb);
}
/******************************************************************************/
void plasma_core_omp_strsm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(inout:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_strsm(side, uplo,
transa, diag,
m, n,
alpha, A, lda,
B, ldb);
}
}
|
sample1.c | /******************************************************************************
* FILE: omp_workshare1.c
* DESCRIPTION:
* OpenMP Example - Loop Work-sharing - C/C++ Version
* In this example, the iterations of a loop are scheduled dynamically
* across the team of threads. A thread will perform CHUNK iterations
* at a time before being scheduled for the next CHUNK of work.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define CHUNKSIZE 10
#define N 100
int main (int argc, char *argv[])
{
int nthreads, tid, i, chunk;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);
#pragma omp for schedule(dynamic,chunk)
for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
} /* end of parallel section */
return 0;
}
|
kernel_parallel.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
# if defined(_OPENMP)
#include <omp.h>
# endif
# if defined(__APPLE__) && defined(__arm64__)
#include <pthread.h>
# endif
typedef struct gemm_def {
libxsmm_datatype in_type;
libxsmm_datatype out_type;
libxsmm_datatype comp_type;
libxsmm_blasint m;
libxsmm_blasint n;
libxsmm_blasint k;
libxsmm_blasint lda;
libxsmm_blasint ldb;
libxsmm_blasint ldc;
double alpha;
double beta;
int trans_a;
int trans_b;
int vnni_a;
int vnni_b;
int vnni_c;
int unsigned_a;
int unsigned_b;
int unsigned_c;
int aligned_a;
int aligned_c;
int prefetch;
int br_type;
libxsmm_blasint br_count;
int br_unroll;
int tc_config;
float scf;
} gemm_def;
void init_random_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) {
double* d_data = (double*) data;
float* f_data = (float*) data;
libxsmm_bfloat16* bf_data = (libxsmm_bfloat16*) data;
int* i_data = (int*) data;
short* s_data = (short*) data;
char* c_data = (char*) data;
unsigned int l_r, l_i, l_j;
for (l_r = 0; l_r < br; l_r++) {
for (l_i = 0; l_i < ld; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
if ( dtype == LIBXSMM_DATATYPE_F64 ) {
d_data[(l_r * ld * n) + (l_j * ld) + l_i] = libxsmm_rng_f64();
} else if ( dtype == LIBXSMM_DATATYPE_F32 ) {
f_data[(l_r * ld * n) + (l_j * ld) + l_i] = (float)libxsmm_rng_f64();
} else if ( dtype == LIBXSMM_DATATYPE_BF16 ) {
union libxsmm_bfloat16_hp tmp;
tmp.f = (float)libxsmm_rng_f64();
bf_data[(l_r * ld * n) + (l_j * ld) + l_i] = tmp.i[1];
} else if ( dtype == LIBXSMM_DATATYPE_I32 ) {
i_data[(l_r * ld * n) + (l_j * ld) + l_i] = (int) (libxsmm_rng_f64() * 20.0);
} else if ( dtype == LIBXSMM_DATATYPE_I16 ) {
s_data[(l_r * ld * n) + (l_j * ld) + l_i] = (short)(libxsmm_rng_f64() * 20.0);
} else if ( dtype == LIBXSMM_DATATYPE_I8 ) {
c_data[(l_r * ld * n) + (l_j * ld) + l_i] = (char) (libxsmm_rng_f64() * 20.0);
} else {
}
}
}
}
}
void init_zero_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) {
char* l_data = (char*) data;
memset( l_data, 0x0, br*ld*n*LIBXSMM_TYPESIZE(dtype) );
}
void init_garbage_matrix( libxsmm_datatype dtype, void* data, libxsmm_blasint br, libxsmm_blasint ld, libxsmm_blasint n ) {
char* l_data = (char*) data;
memset( l_data, 0xdeadbeef, br*ld*n*LIBXSMM_TYPESIZE(dtype) );
}
void ref_matmul( gemm_def* i_gemm_def, void* a, void* b, void* c ) {
unsigned int l_r, l_j, l_i, l_s, l_k2;
unsigned int lda = i_gemm_def->lda;
unsigned int ldb = i_gemm_def->ldb;
unsigned int ldc = i_gemm_def->ldc;
unsigned int m = i_gemm_def->m;
unsigned int n = i_gemm_def->n;
unsigned int k = i_gemm_def->k;
if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_F64) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_F64) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F64) ) {
double* d_a = (double*)a;
double* d_b = (double*)b;
double* d_c = (double*)c;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
d_c[(l_j * ldc) + l_i] = 0.0;
}
for (l_s = 0; l_s < k; l_s++) {
if ( i_gemm_def->trans_b == 0 ) {
d_c[(l_j * ldc) + l_i] += d_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * d_b[(l_r * ldb * n) + ((l_j * ldb) + l_s)];
} else {
d_c[(l_j * ldc) + l_i] += d_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * d_b[(l_r * ldb * k) + ((l_s * ldb) + l_j)];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_F32) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_F32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) {
float* f_a = (float*)a;
float* f_b = (float*)b;
float* f_c = (float*)c;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
f_c[(l_j * ldc) + l_i] = 0.0;
}
for (l_s = 0; l_s < k; l_s++) {
if ( i_gemm_def->trans_b == 0 ) {
f_c[(l_j * ldc) + l_i] += f_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * f_b[(l_r * ldb * n) + ((l_j * ldb) + l_s)];
} else {
f_c[(l_j * ldc) + l_i] += f_a[(l_r * lda * k) + ((l_s * lda) + l_i)] * f_b[(l_r * ldb * k) + ((l_s * ldb) + l_j)];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I16) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) ) {
short* s_a = (short*)a;
short* s_b = (short*)b;
int* i_c = (int*)c;
int l_k_block = 2;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
i_c[(l_j * ldc) + l_i] = 0;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
i_c[(l_j * ldc) + l_i] += s_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
s_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->unsigned_a == 1) && (i_gemm_def->unsigned_b == 0) ) {
unsigned char* c_a = (unsigned char*)a;
char* c_b = (char*)b;
int* i_c = (int*)c;
int l_k_block = 4;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
i_c[(l_j * ldc) + l_i] = 0;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
i_c[(l_j * ldc) + l_i] += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->unsigned_a == 0) && (i_gemm_def->unsigned_b == 1) ) {
char* c_a = (char*)a;
unsigned char* c_b = (unsigned char*)b;
int* i_c = (int*)c;
int l_k_block = 4;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
i_c[(l_j * ldc) + l_i] = 0;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
i_c[(l_j * ldc) + l_i] += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_I8) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_I32) &&
(i_gemm_def->unsigned_a == 0) && (i_gemm_def->unsigned_b == 1) && (i_gemm_def->unsigned_c == 1) ) {
char* c_a = (char*)a;
unsigned char* c_b = (unsigned char*)b;
unsigned char* c_c = (unsigned char*)c;
int l_k_block = 4;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
int tmp;
float ftmp;
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
tmp = 0;
} else {
tmp = (int)c_c[(l_j * ldc) + l_i];
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
tmp += c_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2] *
c_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
}
}
ftmp = (float)tmp;
ftmp *= i_gemm_def->scf;
c_c[(l_j * ldc) + l_i] = (unsigned char)ftmp;
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_BF16) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_F32) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) {
libxsmm_bfloat16* h_a = (libxsmm_bfloat16*)a;
libxsmm_bfloat16* h_b = (libxsmm_bfloat16*)b;
float* f_c = (float*)c;
int l_k_block = ( i_gemm_def->vnni_a != 0) ? 2 : 1;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
f_c[(l_j * ldc) + l_i] = 0.0f;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
union libxsmm_bfloat16_hp tmp_a_f;
union libxsmm_bfloat16_hp tmp_b_f;
tmp_a_f.i[0] = 0;
tmp_a_f.i[1] = h_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2];
tmp_b_f.i[0] = 0;
tmp_b_f.i[1] = h_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
f_c[(l_j * ldc) + l_i] += tmp_a_f.f * tmp_b_f.f;
}
}
}
}
}
} else if ( (i_gemm_def->in_type == LIBXSMM_DATATYPE_BF16) &&
(i_gemm_def->out_type == LIBXSMM_DATATYPE_BF16) &&
(i_gemm_def->comp_type == LIBXSMM_DATATYPE_F32) ) {
libxsmm_bfloat16* h_a = (libxsmm_bfloat16*)a;
libxsmm_bfloat16* h_b = (libxsmm_bfloat16*)b;
libxsmm_bfloat16* h_c = (libxsmm_bfloat16*)c;
int l_k_block = ( i_gemm_def->vnni_a != 0) ? 2 : 1;
float acc = 0.0f;
libxsmm_bfloat16 h_acc;
for (l_r = 0; l_r < i_gemm_def->br_count; l_r++) {
for (l_j = 0; l_j < n; l_j++) {
for (l_i = 0; l_i < m; l_i++) {
if ( (i_gemm_def->beta == 0) && (l_r == 0) ) {
acc = 0.0f;
} else {
union libxsmm_bfloat16_hp tmp;
tmp.i[0] = 0;
tmp.i[1] = h_c[(l_j * ldc) + l_i];
acc = tmp.f;
}
for (l_s = 0; l_s < (k / l_k_block); l_s++) {
for (l_k2 = 0; l_k2 < l_k_block; l_k2++) {
union libxsmm_bfloat16_hp tmp_a_f;
union libxsmm_bfloat16_hp tmp_b_f;
tmp_a_f.i[0] = 0;
tmp_a_f.i[1] = h_a[(l_r * lda * k) + (l_s * (lda*l_k_block)) + (l_i*l_k_block) + l_k2];
tmp_b_f.i[0] = 0;
tmp_b_f.i[1] = h_b[(l_r * ldb * n) + (l_j * ldb) + (l_s*l_k_block) + l_k2];
acc += tmp_a_f.f * tmp_b_f.f;
}
}
libxsmm_rne_convert_fp32_bf16( &acc, &h_acc, 1 );
h_c[(l_j * ldc) + l_i] = h_acc;
}
}
}
}
}
double check_matrix( libxsmm_datatype dtype, void* data_gold, void* data, libxsmm_blasint ld, libxsmm_blasint m, libxsmm_blasint n ) {
libxsmm_matdiff_info l_diff;
double max_error = 0.0;
libxsmm_matdiff_clear(&l_diff);
if ( dtype == LIBXSMM_DATATYPE_F64 ) {
libxsmm_matdiff(&l_diff, LIBXSMM_DATATYPE_F64, m, n, data_gold, data, &ld, &ld);
max_error = l_diff.linf_abs;
} else if ( dtype == LIBXSMM_DATATYPE_F32 ) {
libxsmm_matdiff(&l_diff, LIBXSMM_DATATYPE_F32, m, n, data_gold, data, &ld, &ld);
max_error = l_diff.linf_abs;
} else if ( dtype == LIBXSMM_DATATYPE_BF16 ) {
unsigned int l_i, l_j;
libxsmm_bfloat16* h_data = (libxsmm_bfloat16*)data;
libxsmm_bfloat16* h_data_gold = (libxsmm_bfloat16*)data_gold;
for (l_i = 0; l_i < m; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
union libxsmm_bfloat16_hp tmp_c;
union libxsmm_bfloat16_hp tmp_gold;
double l_fabs;
tmp_c.i[1] = h_data[(l_j * ld) + l_i];
tmp_c.i[0] = 0;
tmp_gold.i[1] = h_data_gold[(l_j * ld) + l_i];
tmp_gold.i[0] = 0;
l_fabs = fabs((double)tmp_gold.f - (double)tmp_c.f);
if (max_error < l_fabs) max_error = l_fabs;
}
}
} else if ( dtype == LIBXSMM_DATATYPE_I32 ) {
unsigned int l_i, l_j;
int* l_data = (int*)data;
int* l_data_gold = (int*)data_gold;
for (l_i = 0; l_i < m; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
const double l_fabs = fabs((double)l_data_gold[(l_j * ld) + l_i] - (double)l_data[(l_j * ld) + l_i]);
if (max_error < l_fabs) max_error = l_fabs;
}
}
} else if ( dtype == LIBXSMM_DATATYPE_I8 ) {
unsigned int l_i, l_j;
unsigned char* l_data = (unsigned char*)data;
unsigned char* l_data_gold = (unsigned char*)data_gold;
for (l_i = 0; l_i < m; l_i++) {
for (l_j = 0; l_j < n; l_j++) {
const double l_fabs = fabs((double)l_data_gold[(l_j * ld) + l_i] - (double)l_data[(l_j * ld) + l_i]);
if (max_error < l_fabs) max_error = l_fabs;
}
}
} else {
max_error = 100.0;
}
return max_error;
}
double jit_matmul( const gemm_def* i_gemm_def,
const void* i_a,
const void* i_b,
void* o_c,
void* o_c_perf,
const int i_reps,
const unsigned int i_print_jit_info ) {
/* define function pointer */
libxsmm_xmmfunction l_test_jit = { NULL };
libxsmm_xmmfunction cfg_tr = { NULL };
libxsmm_xmmfunction rls_tr = { NULL };
libxsmm_timer_tickint l_start;
libxsmm_mmkernel_info l_info;
libxsmm_gemm_shape l_shape;
libxsmm_gemm_batch_reduce_config l_brconfig;
libxsmm_gemm_ext_unary_argops l_argops;
libxsmm_gemm_ext_binary_postops l_postops;
libxsmm_bitfield l_flags = LIBXSMM_GEMM_FLAGS('N', 'N');
libxsmm_bitfield l_prefetch_flags = 0;
#if defined(USE_GEMM_EXT_FRONTEND)
libxsmm_gemm_ext_param gemm_param;
#else
libxsmm_gemm_param gemm_param;
#endif
double l_jittime, l_runtime;
size_t l_t, l_r;
char** l_a_addr = (char**)malloc(i_gemm_def->br_count*sizeof(char*));
char** l_b_addr = (char**)malloc(i_gemm_def->br_count*sizeof(char*));
unsigned long long* l_a_offs = (unsigned long long*)malloc(i_gemm_def->br_count*sizeof(unsigned long long));
unsigned long long* l_b_offs = (unsigned long long*)malloc(i_gemm_def->br_count*sizeof(unsigned long long));
double l_beta = i_gemm_def->beta;
unsigned long long l_br = (unsigned long long)i_gemm_def->br_count;
int l_cfg_flags = 0;
int l_rls_flags = 0;
if (0 == i_gemm_def) {
fprintf(stderr, "JIT: unsupported descriptor arguments or data type!\n");
return EXIT_FAILURE;
}
/* setup brgemm offsets */
if ( i_gemm_def->br_type == 2 ) {
for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) {
l_a_offs[l_r] = l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type);
if (i_gemm_def->trans_b == 0) {
l_b_offs[l_r] = l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type);
} else {
l_b_offs[l_r] = l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type);
}
}
}
/* set up the flags */
if ( i_gemm_def->trans_b != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_TRANS_B;
}
if ( i_gemm_def->trans_a != 0 ) {
fprintf(stderr, "trans_a needs to be 0\n");
return EXIT_FAILURE;
}
if ( i_gemm_def->vnni_a != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_VNNI_A;
}
if ( i_gemm_def->unsigned_a != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_A_UNSIGNED;
}
if ( i_gemm_def->unsigned_b != 0 ) {
l_flags |= LIBXSMM_GEMM_FLAG_B_UNSIGNED;
}
l_flags |= (0 != i_gemm_def->aligned_a ? LIBXSMM_GEMM_FLAG_ALIGN_A : 0);
l_flags |= (0 != i_gemm_def->aligned_c ? LIBXSMM_GEMM_FLAG_ALIGN_C : 0);
l_flags |= ( l_beta == 0 ) ? LIBXSMM_GEMM_FLAG_BETA_0 : 0;
/* setting update GEMM struct */
l_shape = libxsmm_create_gemm_shape( i_gemm_def->m, i_gemm_def->n, i_gemm_def->k,
i_gemm_def->lda, i_gemm_def->ldb, i_gemm_def->ldc,
i_gemm_def->in_type, i_gemm_def->in_type, i_gemm_def->out_type, i_gemm_def->comp_type );
/* setting BRGEMM config struct */
if (i_gemm_def->br_type == 1) {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_ADDRESS;
l_brconfig.br_stride_a_hint = 0;
l_brconfig.br_stride_b_hint = 0;
l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count;
} else if (i_gemm_def->br_type == 2) {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_OFFSET;
l_brconfig.br_stride_a_hint = 0;
l_brconfig.br_stride_b_hint = 0;
l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count;
} else if (i_gemm_def->br_type == 3) {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_STRIDE;
l_brconfig.br_stride_a_hint = i_gemm_def->lda*i_gemm_def->k*LIBXSMM_TYPESIZE(i_gemm_def->in_type);
l_brconfig.br_stride_b_hint = (i_gemm_def->trans_b == 0) ? i_gemm_def->ldb*i_gemm_def->n*LIBXSMM_TYPESIZE(i_gemm_def->in_type) : i_gemm_def->ldb*i_gemm_def->k*LIBXSMM_TYPESIZE(i_gemm_def->in_type);
l_brconfig.br_unroll_hint = ( i_gemm_def->br_unroll == 0 ) ? 0 : i_gemm_def->br_count;
} else {
l_brconfig.br_type = LIBXSMM_GEMM_BATCH_REDUCE_NONE;
l_brconfig.br_stride_a_hint = 0;
l_brconfig.br_stride_b_hint = 0;
l_brconfig.br_unroll_hint = 0;
}
/* setting prefetch flags */
l_prefetch_flags = i_gemm_def->prefetch;
/* setting ext structs to 0 */
memset( &l_argops, 0, sizeof(libxsmm_gemm_ext_unary_argops) );
memset( &l_postops, 0, sizeof(libxsmm_gemm_ext_binary_postops) );
l_start = libxsmm_timer_tick();
if (i_gemm_def->tc_config) {
l_cfg_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | l_flags;
l_rls_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | l_flags;
l_flags |= (LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG);
cfg_tr.gemm = libxsmm_dispatch_brgemm_v2( l_shape, l_cfg_flags, l_prefetch_flags, l_brconfig );
rls_tr.gemm = libxsmm_dispatch_brgemm_v2( l_shape, l_rls_flags, l_prefetch_flags, l_brconfig );
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext = libxsmm_dispatch_brgemm_ext_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig, l_argops, l_postops );
#else
l_test_jit.gemm = libxsmm_dispatch_brgemm_v2( l_shape, l_flags, l_prefetch_flags, l_brconfig );
#endif
l_jittime = libxsmm_timer_duration(l_start, libxsmm_timer_tick());
if (l_test_jit.xmm == 0) {
printf("JIT failed, please run with LIBXSMM_VERBOSE=-1 and/or with debug mode LIBXSMM library!\n");
exit(EXIT_FAILURE);
}
/* receive kernel information */
libxsmm_get_mmkernel_info(l_test_jit, &l_info);
/* run external tileconfig */
if (i_gemm_def->tc_config) {
cfg_tr.gemm( NULL );
}
/* reset GEMM parameter */
#if defined(USE_GEMM_EXT_FRONTEND)
memset( &gemm_param, 0, sizeof(libxsmm_gemm_ext_param) );
#else
memset( &gemm_param, 0, sizeof(libxsmm_gemm_param) );
#endif
gemm_param.op.tertiary = &l_br;
gemm_param.c.primary = (void*)o_c;
gemm_param.c.tertiary = (void*)(( i_gemm_def->unsigned_c != 0 ) ? &(i_gemm_def->scf) : NULL);
/* run correctness */
if (i_gemm_def->br_type == 0) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
if ( l_info.prefetch != LIBXSMM_GEMM_PREFETCH_NONE ) {
gemm_param.a.quaternary = (void*)i_a;
gemm_param.b.quaternary = (void*)i_b;
gemm_param.c.quaternary = (void*)o_c;
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
} else if (i_gemm_def->br_type == 1) {
gemm_param.a.primary = l_a_addr;
gemm_param.b.primary = l_b_addr;
for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) {
l_a_addr[l_r] = (char*)i_a + (l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
if (i_gemm_def->trans_b == 0) {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
} else {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
}
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
} else if (i_gemm_def->br_type == 2) {
gemm_param.a.primary = (void*)i_a;
gemm_param.a.secondary = l_a_offs;
gemm_param.b.primary = (void*)i_b;
gemm_param.b.secondary = l_b_offs;
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
} else if (i_gemm_def->br_type == 3) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
#if defined(USE_GEMM_EXT_FRONTEND)
test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
/* run performance */
gemm_param.c.primary = (void*)o_c_perf;
l_start = libxsmm_timer_tick();
if (i_gemm_def->br_type == 0) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
if ( l_info.prefetch != LIBXSMM_GEMM_PREFETCH_NONE ) {
gemm_param.a.quaternary = (void*)i_a;
gemm_param.b.quaternary = (void*)i_b;
gemm_param.c.quaternary = (void*)o_c_perf;
}
for (l_t = 0; l_t < i_reps; l_t++) {
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
} else if (i_gemm_def->br_type == 1) {
gemm_param.a.primary = l_a_addr;
gemm_param.b.primary = l_b_addr;
for (l_t = 0; l_t < i_reps; l_t++) {
for ( l_r = 0 ; l_r < i_gemm_def->br_count; l_r++ ) {
l_a_addr[l_r] = (char*)i_a + (l_r * (size_t)i_gemm_def->lda * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
if (i_gemm_def->trans_b == 0) {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->n * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
} else {
l_b_addr[l_r] = (char*)i_b + (l_r * (size_t)i_gemm_def->ldb * (size_t)i_gemm_def->k * LIBXSMM_TYPESIZE(i_gemm_def->in_type));
}
}
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
} else if (i_gemm_def->br_type == 2) {
gemm_param.a.primary = (void*)i_a;
gemm_param.a.secondary = l_a_offs;
gemm_param.b.primary = (void*)i_b;
gemm_param.b.secondary = l_b_offs;
for (l_t = 0; l_t < i_reps; l_t++) {
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
} else if (i_gemm_def->br_type == 3) {
gemm_param.a.primary = (void*)i_a;
gemm_param.b.primary = (void*)i_b;
for (l_t = 0; l_t < i_reps; l_t++) {
#if defined(USE_GEMM_EXT_FRONTEND)
l_test_jit.gemm_ext( &gemm_param );
#else
l_test_jit.gemm( &gemm_param );
#endif
}
}
l_runtime = libxsmm_timer_duration(l_start, libxsmm_timer_tick());
/* run external tilerelease */
if (i_gemm_def->tc_config) {
rls_tr.gemm( NULL );
}
if ( i_print_jit_info == 0 ) {
printf("function pointer address: %llx\n", (unsigned long long)l_test_jit.xmm);
printf("%fs for creating jit\n", l_jittime);
}
free( (void*)l_a_addr );
free( (void*)l_b_addr );
free( (void*)l_a_offs );
free( (void*)l_b_offs );
return l_runtime;
}
void print_help(void) {
printf("\n\n");
printf("1. Usage (dense*dense=dense, correctness and performance):\n");
printf(" M\n");
printf(" N\n");
printf(" K\n");
printf(" LDA\n");
printf(" LDB\n");
printf(" LDC\n");
printf(" alpha: 1\n");
printf(" beta: 0 or 1\n");
printf(" 0: unaligned A, otherwise aligned\n");
printf(" 0: unaligned C, otherwise aligned\n");
printf(" 0: A normal, 1: A trans\n");
printf(" 0: B normal, 1: B trans\n");
printf(" PREFETCH: nopf (none), pfsigonly, BL2viaC, AL2, curAL2, AL2_BL2viaC, curAL2_BL2viaC\n");
printf(" PRECISION: SP, DP, I16I32, USI8I32, SUI8I32, SUI8UI8, BF16F32, BF16, BF16F32_FLAT, BF16_FLAT\n");
printf(" BRGEMM: nobr, addrbr, offsbr, strdbr\n");
printf(" BRsize: 1 - N\n");
printf(" BRunroll: 0/1\n");
printf(" #repetitions\n");
printf(" tile configuration: 1 - external, 0 - internal\n");
printf("\n\n");
printf("2. Usage (dense*dense=dense, performance only option available):\n");
printf(" filename with space-sperated sizes (M N K LDA LDB LDC)\n");
printf(" alpha: 1\n");
printf(" beta: 0 or 1\n");
printf(" 0: unaligned A, otherwise aligned\n");
printf(" 0: unaligned C, otherwise aligned\n");
printf(" 0: A normal, 1: A trans\n");
printf(" 0: B normal, 1: B trans\n");
printf(" PRECISION: SP, DP, I16I32, USI8I32, SUI8I32, SUI8UI8, BF16F32, BF16, BF16F32_FLAT, BF16_FLAT\n");
printf(" BRGEMM: nobr, addrbr, offsbr, strdbr\n");
printf(" BRsize: 1 - N\n");
printf(" BRunroll: 0/1\n");
printf(" #repetitions\n");
printf(" 0: no check, otherwise: run check\n");
printf(" tile configuration: 1 - external, 0 - internal\n");
printf("\n\n");
}
int main(int argc, char* argv []) {
char* l_precision = NULL;
libxsmm_blasint l_lda = 0, l_ldb = 0, l_ldc = 0;
libxsmm_blasint l_m = 0, l_n = 0, l_k = 0;
int l_aligned_a = 0;
int l_aligned_c = 0;
int l_trans_a = 0;
int l_trans_b = 0;
double l_alpha = 0;
double l_beta = 0;
int l_br = 1;
int l_br_type = 0;
int l_br_unroll = 0;
double l_runtime_libxsmm = 0;
int l_file_input = 0;
char* l_file_name = NULL;
FILE *l_file_handle = NULL;
int l_run_check = 0;
double l_total_max_error = 0.0;
int l_tc_config = 0;
int l_reps;
libxsmm_gemm_prefetch_type l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
gemm_def l_gemm_def;
int l_n_threads = 1;
# if defined(__APPLE__) && defined(__arm64__)
# if 1
pthread_set_qos_class_self_np( QOS_CLASS_USER_INTERACTIVE, 0 );
# else
pthread_set_qos_class_self_np( QOS_CLASS_BACKGROUND, 0 );
# endif
# endif
/* check argument count for a valid range */
if ( argc == 20 || argc == 19 ) {
/* xgemm sizes */
l_m = atoi(argv[1]);
l_n = atoi(argv[2]);
l_k = atoi(argv[3]);
l_lda = atoi(argv[4]);
l_ldb = atoi(argv[5]);
l_ldc = atoi(argv[6]);
/* some sugar */
l_alpha = atof(argv[7]);
l_beta = atof(argv[8]);
l_aligned_a = atoi(argv[9]);
l_aligned_c = atoi(argv[10]);
l_trans_a = atoi(argv[11]);
l_trans_b = atoi(argv[12]);
/* arch specific stuff */
l_precision = argv[14];
l_br = atoi(argv[16]);
l_br_unroll = atoi(argv[17]);
l_reps = atoi(argv[18]);
if ( argc == 20 ) {
l_tc_config = atoi(argv[19]);
} else {
l_tc_config = 0;
}
/* set value of prefetch flag */
if (strcmp("nopf", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
}
else if (strcmp("pfsigonly", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_SIGONLY;
}
else if (strcmp("BL2viaC", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_BL2_VIA_C;
}
else if (strcmp("curAL2", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2_AHEAD;
}
else if (strcmp("curAL2_BL2viaC", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD;
}
else if (strcmp("AL2", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2;
}
else if (strcmp("AL2_BL2viaC", argv[13]) == 0) {
l_prefetch = LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C;
}
else {
print_help();
return EXIT_FAILURE;
}
if (strcmp("nobr", argv[15]) == 0) {
l_br_type = 0;
}
else if (strcmp("addrbr", argv[15]) == 0) {
l_br_type = 1;
}
else if (strcmp("offsbr", argv[15]) == 0) {
l_br_type = 2;
}
else if (strcmp("strdbr", argv[15]) == 0) {
l_br_type = 3;
}
else {
print_help();
return EXIT_FAILURE;
}
l_file_input = 0;
l_run_check = 1;
} else if ( argc == 15 || argc == 14 ) {
l_file_input = 1;
l_file_name = argv[1];
l_alpha = atof(argv[2]);
l_beta = atof(argv[3]);
l_aligned_a = atoi(argv[4]);
l_aligned_c = atoi(argv[5]);
l_trans_a = atoi(argv[6]);
l_trans_b = atoi(argv[7]);
l_precision = argv[8];
l_br = atoi(argv[10]);
l_br_unroll = atoi(argv[11]);
if ( argc == 15 ) {
l_tc_config = atoi(argv[14]);
} else {
l_tc_config = 0;
}
if (strcmp("nobr", argv[9]) == 0) {
l_br_type = 0;
}
else if (strcmp("addrbr", argv[9]) == 0) {
l_br_type = 1;
}
else if (strcmp("offsbr", argv[9]) == 0) {
l_br_type = 2;
}
else if (strcmp("strdbr", argv[9]) == 0) {
l_br_type = 3;
}
else {
print_help();
return EXIT_FAILURE;
}
l_reps = atoi(argv[12]);
l_run_check = atoi(argv[13]);
l_prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
} else {
print_help();
return EXIT_FAILURE;
}
const char *env_arch = getenv("LIBXSMM_TARGET");
const int is_env_SPR = (
env_arch == libxsmm_stristr(env_arch, "spr") ||
env_arch == libxsmm_stristr(env_arch, "amx"));
int arch_cpuid = libxsmm_cpuid();
if ((!is_env_SPR && arch_cpuid < LIBXSMM_X86_AVX512_SPR)
&& (l_tc_config)) {
printf("Warning: external tile configuration will be ingnored\n");
l_tc_config = 0;
}
l_br = (l_br < 1) ? 1 : l_br;
l_br = (l_br_type == 0) ? 1 : l_br;
l_br_unroll = (l_br_type == 0) ? 0 : l_br_unroll;
/* check alpha */
if ( LIBXSMM_NEQ(l_alpha, 1.0) ) {
fprintf(stderr, "JIT: alpha needs to be 1.0!\n");
exit(EXIT_FAILURE);
}
/* check beta */
if ( LIBXSMM_NEQ(l_beta, 0.0) && LIBXSMM_NEQ(l_beta, 1.0) ) {
fprintf(stderr, "JIT: beta needs to be 0.0 or 1.0!\n");
exit(EXIT_FAILURE);
}
/* setting static GEMM parameters */
l_gemm_def.alpha = l_alpha;
l_gemm_def.beta = l_beta;
l_gemm_def.trans_a = l_trans_a;
l_gemm_def.trans_b = l_trans_b;
l_gemm_def.vnni_a = 0;
l_gemm_def.vnni_b = 0;
l_gemm_def.vnni_c = 0;
l_gemm_def.unsigned_a = 0;
l_gemm_def.unsigned_b = 0;
l_gemm_def.unsigned_c = 0;
l_gemm_def.aligned_a = l_aligned_a;
l_gemm_def.aligned_c = l_aligned_c;
l_gemm_def.prefetch = l_prefetch;
l_gemm_def.br_type = l_br_type;
l_gemm_def.br_count = l_br;
l_gemm_def.br_unroll = l_br_unroll;
l_gemm_def.tc_config = l_tc_config;
l_gemm_def.scf = 0.0;
/* setting precision in GEMM struct */
if ( (strcmp(l_precision, "DP") == 0) ) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_F64;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F64;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F64;
} else if ( (strcmp(l_precision, "SP") == 0) ) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
} else if ( (strcmp(l_precision, "I16I32") == 0) ) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
} else if (strcmp(l_precision, "USI8I32") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I8;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
l_gemm_def.unsigned_a = 1;
} else if (strcmp(l_precision, "SUI8I32") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I8;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
l_gemm_def.unsigned_b = 1;
} else if (strcmp(l_precision, "SUI8UI8") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_I8;
l_gemm_def.out_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_I32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
l_gemm_def.unsigned_b = 1;
l_gemm_def.unsigned_c = 1;
l_gemm_def.scf = 1.0f;
} else if (strcmp(l_precision, "BF16F32") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
} else if (strcmp(l_precision, "BF16") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.vnni_a = 1;
l_gemm_def.trans_a = 0;
l_gemm_def.trans_b = 0;
} else if (strcmp(l_precision, "BF16F32_FLAT") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_F32;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
} else if (strcmp(l_precision, "BF16_FLAT") == 0) {
l_gemm_def.in_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.out_type = LIBXSMM_DATATYPE_BF16;
l_gemm_def.comp_type = LIBXSMM_DATATYPE_F32;
} else {
fprintf(stderr, "Unsupported precision %s!\n", l_precision);
exit(EXIT_FAILURE);
}
if ( l_file_input != 0 ) {
l_file_handle = fopen( l_file_name, "r" );
} else {
if ( l_trans_b == 0 ) {
printf("------------------------------------------------\n");
printf("RUNNING (%ix%i) X (%ix%i) = (%ix%i), %s, BR=%i\n", l_m, l_k, l_k, l_n, l_m, l_n, l_precision, l_br);
printf("------------------------------------------------\n");
} else {
printf("------------------------------------------------\n");
printf("RUNNING (%ix%i) X (%ix%i)^T = (%ix%i), %s, BR=%i\n", l_m, l_k, l_k, l_n, l_m, l_n, l_precision, l_br);
printf("------------------------------------------------\n");
}
}
/* read the number of threads */
#if defined(_OPENMP)
#pragma omp parallel
{
#pragma omp master
{
l_n_threads = omp_get_num_threads();
}
}
#endif
unsigned int l_keep_going = 0;
do {
double error = 0.0;
if ( l_file_input != 0 ) {
char l_line[512];
if ( fgets( l_line, 512, l_file_handle) == NULL ) {
l_keep_going = 0;
break;
} else {
l_keep_going = 1;
}
if ( 6 != sscanf( l_line, "%i %i %i %i %i %i", &l_m, &l_n, &l_k, &l_lda, &l_ldb, &l_ldc ) ) exit(EXIT_FAILURE);
}
l_gemm_def.m = l_m;
l_gemm_def.n = l_n;
l_gemm_def.k = l_k;
l_gemm_def.lda = l_lda;
l_gemm_def.ldb = l_ldb;
l_gemm_def.ldc = l_ldc;
l_runtime_libxsmm = 0;
#if defined(_OPENMP)
#pragma omp parallel reduction(+:l_runtime_libxsmm)
#endif
{
char *l_a, *l_b, *l_c, *l_c_perf, *l_c_gold;
l_a = (char*)libxsmm_aligned_malloc((size_t)l_lda * (size_t)l_k * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64);
if (l_gemm_def.trans_b == 0) {
l_b = (char*)libxsmm_aligned_malloc((size_t)l_ldb * (size_t)l_n * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64);
} else {
l_b = (char*)libxsmm_aligned_malloc((size_t)l_ldb * (size_t)l_k * (size_t)l_br * LIBXSMM_TYPESIZE(l_gemm_def.in_type), 64);
}
l_c = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64);
l_c_perf = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64);
l_c_gold = (char*)libxsmm_aligned_malloc((size_t)l_ldc * (size_t)l_n * LIBXSMM_TYPESIZE(l_gemm_def.out_type), 64);
init_random_matrix( l_gemm_def.in_type, l_a, l_br, l_lda, l_k );
if (l_gemm_def.trans_b == 0) {
init_random_matrix( l_gemm_def.in_type, l_b, l_br, l_ldb, l_n );
} else {
init_random_matrix( l_gemm_def.in_type, l_b, l_br, l_ldb, l_k );
}
if ( l_beta == 0 ) {
init_garbage_matrix( l_gemm_def.out_type, l_c, 1, l_ldc, l_n );
init_garbage_matrix( l_gemm_def.out_type, l_c_perf, 1, l_ldc, l_n );
init_garbage_matrix( l_gemm_def.out_type, l_c_gold, 1, l_ldc, l_n );
} else {
init_zero_matrix( l_gemm_def.out_type, l_c, 1, l_ldc, l_n );
init_zero_matrix( l_gemm_def.out_type, l_c_perf, 1, l_ldc, l_n );
init_zero_matrix( l_gemm_def.out_type, l_c_gold, 1, l_ldc, l_n );
}
/* run gold solution */
#pragma omp master
{
ref_matmul( &l_gemm_def, l_a, l_b, l_c_gold );
}
/* run LIBXSMM solution */
l_runtime_libxsmm = jit_matmul( &l_gemm_def, l_a, l_b, l_c, l_c_perf, l_reps, l_file_input );
/* run compare */
#pragma omp master
{
error = check_matrix( l_gemm_def.out_type, l_c_gold, l_c, l_ldc, l_m, l_n );
}
libxsmm_free(l_a);
libxsmm_free(l_b);
libxsmm_free(l_c);
libxsmm_free(l_c_perf);
libxsmm_free(l_c_gold);
}
l_runtime_libxsmm /= (double)l_n_threads;
if ( l_file_input == 0 ) {
printf("%fs for libxsmm\n", l_runtime_libxsmm);
printf("%f GFLOPS for libxsmm\n", ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9));
printf("max. error: %f\n", error);
} else {
if ( l_run_check == 1 ) {
printf("%i %i %i %i %i %i %i %i %i %s %f %f\n", l_m, l_n, l_k, l_lda, l_ldb, l_ldc, l_br, l_br_type, l_br_unroll, l_precision, ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9), error );
} else {
printf("%i %i %i %i %i %i %i %i %i %s %f\n", l_m, l_n, l_k, l_lda, l_ldb, l_ldc, l_br, l_br_type, l_br_unroll, l_precision, ((double)((double)l_reps * (double)l_m * (double)l_n * (double)l_k * (double)l_br * (double)l_n_threads) * 2.0) / (l_runtime_libxsmm * 1.0e9) );
}
}
if ( (l_total_max_error < error) && (l_run_check == 1) ) {
l_total_max_error = error;
}
} while ( l_keep_going );
if ( l_file_input != 0 ) {
fclose( l_file_handle );
} else {
printf("------------------------------------------------\n");
}
/* Print total max error */
printf("\n\n Total Max Error %f\n\n", l_total_max_error );
if ( l_total_max_error >= 0.00005 && l_br_type == 0) {
return EXIT_FAILURE;
} else if ( l_total_max_error >= 0.0005 && l_br_type > 0) {
return EXIT_FAILURE;
} else {
return EXIT_SUCCESS;
}
}
|
zuker_traco3.h | void zuker_traco3()
{
int c0,c1,c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15;
// tstile
if(1==1)
for( c0 = 0; c0 <= floord(N - 2, 8); c0 += 1)
#pragma omp parallel for schedule(dynamic, 1) shared(c0) private(c1,c2,c3,c4,c5,c6,c7,c8,c9,c11,c10,c12,c13,c14,c15)
for( c1 = (c0 + 1) / 2; c1 <= min(c0, (N - 1) / 16); c1 += 1)
for( c3 = 16 * c0 - 16 * c1 + 1; c3 <= min(min(N - 1, 16 * c1 + 15), 16 * c0 - 16 * c1 + 16); c3 += 1)
for( c4 = max(0, -c1 + (N + 1) / 16 - 1); c4 <= min((N - 1) / 16, -c1 + (N + c3 - 1) / 16); c4 += 1)
for( c6 = max(max(-N + 16 * c1 + 1, -N + c3 + 1), -16 * c4 - 14); c6 <= min(min(0, -N + 16 * c1 + 16), c3 - 16 * c4); c6 += 1) {
if (c3 >= 2 && 2 * c3 >= 16 * c4 + c6 + 3) {
if (c3 >= 4 && c3 >= 16 * c4 + c6 + 1) {
if (16 * c4 + c6 + 1 == c3)
V[(-c3+16*c4+1)][(16*c4+1)] = MIN(W[(-c3+16*c4+1)+1][(-c3+16*c4+2)] + W[(-c3+16*c4+2)+1][(16*c4+1)-1], V[(-c3+16*c4+1)][(16*c4+1)]);
for( c10 = max(-c3 + 16 * c4 + 3, -c6 + 1); c10 <= min(16 * c4 + 14, c3 - c6 - 2); c10 += 1) {
for( c12 = max(16 * c4, c10 + 1); c12 <= min(min(16 * c4 + 15, c3 - c6 - 1), c3 + c10 - 3); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
if (c10 >= 16 * c4)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
if (16 * c4 + c6 + 15 >= c3)
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
if (16 * c4 + c6 + 15 >= c3) {
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][(c3-c6-1)], W[(c3-c6-1)+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
} else {
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][(16*c4+15)], W[(16*c4+15)+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
} else if (c1 == c0 && c3 <= 3) {
if (c3 == 3 && 16 * c4 + c6 >= 2)
V[(-c6)][(-c6+3)] = MIN(W[(-c6)+1][(-c6+1)] + W[(-c6+1)+1][(-c6+3)-1], V[(-c6)][(-c6+3)]);
for( c10 = max(16 * c4, -c6 + 1); c10 <= min(16 * c4 + 15, c3 - c6 - 1); c10 += 1) {
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
if (c3 == 3 && 16 * c4 + c6 >= -12 && c6 + c10 == 1)
V[(-c6)][(-c6+3)] = MIN(W[(-c6)+1][(-c6+1)] + W[(-c6+1)+1][(-c6+3)-1], V[(-c6)][(-c6+3)]);
}
} else {
for( c10 = -c3 + 16 * c4 + 1; c10 < 16 * c4 - 1; c10 += 1)
V[(-c3+16*c4)][16*c4] = MIN(W[(-c3+16*c4)+1][c10] + W[c10+1][16*c4-1], V[(-c3+16*c4)][16*c4]);
}
}
if (16 * c4 + c6 + 15 >= c3) {
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
if(1==0)
for( c0 = 0; c0 <= floord(N - 2, 8); c0 += 1)
#pragma omp parallel for schedule(dynamic, 1)
for( c1 = (c0 + 1) / 2; c1 <= min(c0, (N - 1) / 16); c1 += 1)
for( c3 = 16 * c0 - 16 * c1 + 1; c3 <= min(min(N - 1, 16 * c1 + 15), 16 * c0 - 16 * c1 + 16); c3 += 1)
{
if (c3 >= 3 && (c3 % 16) + N >= 16 * c1 + c3 + 31)
{
for( c6 = max(-N + 16 * c1 + 1, -N + c3 + 1); c6 <= -N + 16 * c1 + 16; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
else if (N >= 16 * c0 + 30 && c1 == c0 && c3 == 2)
for( c6 = max(-N + 3, -N + 16 * c0 + 1); c6 <= -N + 16 * c0 + 16; c6 += 1)
W[(-c6)][(-c6+2)] += MIN ( MIN(W[(-c6)][(-c6+1)], W[(-c6+1)+1][(-c6+2)]), W[(-c6)][(-c6+2)]);
if (c3 >= 3)
{
for( c4 = max(0, -c1 + (N + 1) / 16 - 1); c4 < -c1 + (N + c3) / 16 - 1; c4 += 1)
{
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3)
for( c6 = max(-N + 16 * c1 + 1, -N + c3 + 1); c6 < -16 * c4 - 14; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
for( c6 = max(max(-N + 16 * c1 + 1, -N + c3 + 1), -16 * c4 - 14); c6 <= min(0, -N + 16 * c1 + 16); c6 += 1)
{
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3)
for( c10 = -c6 + 1; c10 < 16 * c4; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
for( c10 = max(16 * c4, -c6 + 1); c10 <= min(16 * c4 + 15, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
if (N >= 16 * c1 + 17 && 16 * c1 + 32 >= N && 16 * c1 + 16 * c4 + 32 == N + c3 && N + c6 == 16 * c1 + 16)
W[(N-16*c1-16)][(N-16*c1+c3-16)] += MIN ( MIN(W[(N-16*c1-16)][(N-16*c1+c3-17)], W[(N-16*c1+c3-17)+1][(N-16*c1+c3-16)]), W[(N-16*c1-16)][(N-16*c1+c3-16)]);
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3)
for( c10 = 16 * c4 + 16; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
if ((c3 >= 16 && 16 * c1 + 31 >= N + c3) || (N + c3 >= 16 * c1 + 32 && 16 * c1 + 15 >= ((15 * N + 15 * c3 + 15) % 16) + N))
{
int c4 = 16 * c1 + 30 >= N + c3 ? 0 : N - c1 + c3 - (15 * N + 15 * c3 + 15) / 16 - 1;
for( c6 = max(-N + 16 * c1 + 1, -N + c3 + 1); c6 <= 0; c6 += 1)
for( c10 = max(16 * c4, -c6 + 1); c10 <= min(16 * c4 + 15, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
}
for( c4 = max(c3 / 16, -c1 + (N + c3) / 16 - 1); c4 <= min((N - 1) / 16, -c1 + (N + c3 - 1) / 16); c4 += 1)
{
if (16 * c4 >= c3 + 1)
{
for( c6 = max(max(-N + 16 * c1 + 1, -N + c3 + 1), -16 * c4 - 14); c6 < c3 - 16 * c4 - 15; c6 += 1)
for( c10 = max(16 * c4, -c6 + 1); c10 <= min(16 * c4 + 15, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
else
for( c6 = max(-N + 16 * c1 + 1, -N + c3 + 1); c6 < c3 - 16 * c4 - 15; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
{
if (c10 >= 16 * c4 && 16 * c4 + 15 >= c10)
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
if (c3 >= c6 + c10 + 2 && c10 >= 16 * c4 && 16 * c4 + 15 >= c10)
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
for( c6 = max(max(-N + 16 * c1 + 1, -N + c3 + 1), c3 - 16 * c4 - 15); c6 <= min(min(0, -N + 16 * c1 + 16), c3 - 16 * c4); c6 += 1)
{
if (c3 >= 3 && c3 >= 16 * c4 + c6 + 2)
{
if (c3 >= 16 * c4)
for( c10 = -c6 + 1; c10 < 16 * c4; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
for( c10 = max(16 * c4, -c6 + 1); c10 < c3 - c6 - 1; c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
if (c3 >= 16 * c4)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
if (c3 >= 16 * c4)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][(c3-c6-1)], W[(c3-c6-1)+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
else if (c0 + 15 * c4 + 1 == c1 + c3 && 16 * c1 + c3 + 15 * c6 >= 16 * c0 + 1)
{
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
else if (c1 == c0 && c3 == 2 && c4 == 0)
W[(-c6)][(-c6+2)] += MIN ( MIN(W[(-c6)][(-c6+1)], W[(-c6+1)+1][(-c6+2)]), W[(-c6)][(-c6+2)]);
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
}
if(1==0)
for( c0 = 1; c0 < N + floord(N - 2, 16); c0 += 1)
#pragma omp parallel for schedule(dynamic, 1)
for( c1 = c0 - (c0 + 16) / 17 + 1; c1 <= min(N - 1, c0); c1 += 1)
for( c3 = 16 * c0 - 16 * c1 + 1; c3 <= min(c1, 16 * c0 - 16 * c1 + 16); c3 += 1)
{
if (c3 >= 3 && (c3 % 16) + N >= c1 + c3 + 16)
for( c10 = N - c1; c10 < N - c1 + c3 - 1; c10 += 1)
W[(N-c1-1)][(N-c1+c3-1)] += MIN ( MIN(W[(N-c1-1)][c10], W[c10+1][(N-c1+c3-1)]), W[(N-c1-1)][(N-c1+c3-1)]);
if (c3 >= 3)
for( c4 = (N - c1) / 16; c4 <= (N - c1 + c3 - 3) / 16; c4 += 1)
{
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3)
for( c10 = N - c1; c10 < 16 * c4; c10 += 1)
W[(N-c1-1)][(N-c1+c3-1)] += MIN ( MIN(W[(N-c1-1)][c10], W[c10+1][(N-c1+c3-1)]), W[(N-c1-1)][(N-c1+c3-1)]);
for( c10 = max(N - c1, 16 * c4); c10 <= min(N - c1 + c3 - 3, 16 * c4 + 15); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(N - c1 + c3 - 1, c3 + c10 - 2); c12 += 1)
V[(N-c1-1)][(N-c1+c3-1)] = MIN(V[c10][c12] + EFL[(N-c1-1)][(N-c1+c3-1)], V[(N-c1-1)][(N-c1+c3-1)]);
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3)
W[(N-c1-1)][(N-c1+c3-1)] += MIN ( MIN(W[(N-c1-1)][c10], W[c10+1][(N-c1+c3-1)]), W[(N-c1-1)][(N-c1+c3-1)]);
V[(N-c1-1)][(N-c1+c3-1)] = MIN(W[(N-c1-1)+1][c10] + W[c10+1][(N-c1+c3-1)-1], V[(N-c1-1)][(N-c1+c3-1)]);
}
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3 && c1 + 16 * c4 + 17 >= N + c3)
W[(N-c1-1)][(N-c1+c3-1)] += MIN ( MIN(W[(N-c1-1)][(N-c1+c3-2)], W[(N-c1+c3-2)+1][(N-c1+c3-1)]), W[(N-c1-1)][(N-c1+c3-1)]);
if (c3 >= 16 * c4 && 16 * c4 + 15 >= c3)
for( c10 = 16 * c4 + 16; c10 < N - c1 + c3 - 1; c10 += 1)
W[(N-c1-1)][(N-c1+c3-1)] += MIN ( MIN(W[(N-c1-1)][c10], W[c10+1][(N-c1+c3-1)]), W[(N-c1-1)][(N-c1+c3-1)]);
}
if (31 * c1 + c3 + 14 >= 15 * N + 16 * c0 && (c0 - c1 - c3 + 1) % 15 == 0)
for( c10 = N - c1; c10 < N - c1 + c3 - 1; c10 += 1)
W[(N-c1-1)][(N-c1+c3-1)] += MIN ( MIN(W[(N-c1-1)][c10], W[c10+1][(N-c1+c3-1)]), W[(N-c1-1)][(N-c1+c3-1)]);
if (c1 == c0 && c3 == 2)
W[(N-c0-1)][(N-c0+1)] += MIN ( MIN(W[(N-c0-1)][(N-c0)], W[(N-c0)+1][(N-c0+1)]), W[(N-c0-1)][(N-c0+1)]);
V[(N-c1-1)][(N-c1+c3-1)] = MIN( MIN (V[(N-c1-1)+1][(N-c1+c3-1)-1], EHF[(N-c1-1)][(N-c1+c3-1)]), V[(N-c1-1)][(N-c1+c3-1)]);
W[(N-c1-1)][(N-c1+c3-1)] = MIN( MIN ( MIN ( W[(N-c1-1)+1][(N-c1+c3-1)], W[(N-c1-1)][(N-c1+c3-1)-1]), V[(N-c1-1)][(N-c1+c3-1)]), W[(N-c1-1)][(N-c1+c3-1)]);
}
if(1==0)
for( c0 = 0; c0 <= floord(N - 2, 16); c0 += 1)
#pragma omp parallel for
for( c1 = (c0 + 1) / 2; c1 <= min(c0, (N - 1) / 32); c1 += 1)
for( c3 = 32 * c0 - 32 * c1 + 1; c3 <= min(min(N - 1, 32 * c1 + 31), 32 * c0 - 32 * c1 + 32); c3 += 1)
{
if (c3 >= 3 && (c3 % 32) + N >= 32 * c1 + c3 + 63)
{
for( c6 = max(-N + 32 * c1 + 1, -N + c3 + 1); c6 <= -N + 32 * c1 + 32; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
else if (N >= 32 * c0 + 62 && c1 == c0 && c3 == 2)
{
for( c6 = max(-N + 3, -N + 32 * c0 + 1); c6 <= -N + 32 * c0 + 32; c6 += 1)
W[(-c6)][(-c6+2)] += MIN ( MIN(W[(-c6)][(-c6+1)], W[(-c6+1)+1][(-c6+2)]), W[(-c6)][(-c6+2)]);
}
if (c3 >= 3)
{
for( c4 = max(0, -c1 + (N + 1) / 32 - 1); c4 < -c1 + (N + c3) / 32 - 1; c4 += 1)
{
if (c3 >= 32 * c4 + 32)
{
for( c6 = max(max(-N + 32 * c1 + 1, -N + c3 + 1), -32 * c4 - 30); c6 <= min(0, -N + 32 * c1 + 32); c6 += 1)
for( c10 = max(32 * c4, -c6 + 1); c10 <= min(32 * c4 + 31, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
else if (32 * c4 >= c3 + 1)
{
for( c6 = max(max(-N + 32 * c1 + 1, -N + c3 + 1), -32 * c4 - 30); c6 <= -N + 32 * c1 + 32; c6 += 1)
for( c10 = max(32 * c4, -c6 + 1); c10 <= min(32 * c4 + 31, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
else
{
for( c6 = max(-N + 32 * c1 + 1, -N + c3 + 1); c6 <= -N + 32 * c1 + 32; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
{
if (c10 >= 32 * c4 && 32 * c4 + 31 >= c10)
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
if (c3 >= c6 + c10 + 2 && c10 >= 32 * c4 && 32 * c4 + 31 >= c10)
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
}
if ((c3 >= 32 && 32 * c1 + 63 >= N + c3) || (N + c3 >= 32 * c1 + 64 && 32 * c1 + 31 >= ((31 * N + 31 * c3 + 31) % 32) + N))
{
int c4 = 32 * c1 + 62 >= N + c3 ? 0 : N - c1 + c3 - (31 * N + 31 * c3 + 31) / 32 - 1;
for( c6 = max(-N + 32 * c1 + 1, -N + c3 + 1); c6 <= 0; c6 += 1)
for( c10 = max(32 * c4, -c6 + 1); c10 <= min(32 * c4 + 31, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
}
for( c4 = max(c3 / 32, -c1 + (N + c3) / 32 - 1); c4 <= min((N - 1) / 32, -c1 + (N + c3 - 1) / 32); c4 += 1)
{
if (c1 == c0 && c3 >= 3 && c4 == 0)
{
for( c6 = max(-N + 32 * c0 + 1, -N + c3 + 1); c6 < -30; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
else if (c1 == c0 && c3 == 2 && c4 == 0)
{
for( c6 = max(-N + 3, -N + 32 * c0 + 1); c6 < -29; c6 += 1)
W[(-c6)][(-c6+2)] += MIN ( MIN(W[(-c6)][(-c6+1)], W[(-c6+1)+1][(-c6+2)]), W[(-c6)][(-c6+2)]);
}
if (c3 >= 3)
for( c6 = max(max(-N + 32 * c1 + 1, -N + c3 + 1), -32 * c4 - 30); c6 < c3 - 32 * c4 - 31; c6 += 1)
{
if (32 * c4 >= c3 + 1)
{
for( c10 = max(32 * c4, -c6 + 1); c10 <= min(32 * c4 + 31, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
else
{
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
{
if (c10 >= 32 * c4 && 32 * c4 + 31 >= c10)
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
if (c3 >= c6 + c10 + 2 && c10 >= 32 * c4 && 32 * c4 + 31 >= c10)
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
}
for( c6 = max(max(-N + 32 * c1 + 1, -N + c3 + 1), c3 - 32 * c4 - 31); c6 <= min(min(0, -N + 32 * c1 + 32), c3 - 32 * c4); c6 += 1)
{
if (32 * c4 >= c3 + 1)
{
for( c10 = max(32 * c4, -c6 + 1); c10 < c3 - c6 - 1; c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
else
{
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
{
if (c10 >= 32 * c4)
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
if (c3 >= c6 + c10 + 2 && c10 >= 32 * c4)
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
}
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
}
if(1==0)
for( c0 = 0; c0 <= floord(N - 2, 16); c0 += 1)
#pragma omp parallel for schedule(dynamic, 1)
for( c1 = (c0 + 1) / 2; c1 <= min(c0, (N - 1) / 32); c1 += 1)
for( c3 = 32 * c0 - 32 * c1 + 1; c3 <= min(min(N - 1, 32 * c1 + 31), 32 * c0 - 32 * c1 + 32); c3 += 1)
{
if (c3 >= 3)
{
if ((c3 % 32) + N >= 32 * c1 + c3 + 63)
for( c6 = max(-N + 32 * c1 + 1, -N + c3 + 1); c6 <= -N + 32 * c1 + 32; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
for( c4 = max(0, -c1 + (N + 1) / 32 - 1); c4 <= min((N - 3) / 32, -c1 + (N + c3 - 3) / 32); c4 += 1)
{
if (c3 >= 32 * c4 && 32 * c4 + 31 >= c3)
for( c6 = max(-N + 32 * c1 + 1, -N + c3 + 1); c6 < -32 * c4 - 30; c6 += 1)
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
for( c6 = max(max(-N + 32 * c1 + 1, -N + c3 + 1), -32 * c4 - 30); c6 <= min(min(0, -N + 32 * c1 + 32), c3 - 32 * c4); c6 += 1)
{
if (c3 >= 32 * c4 + c6 + 2)
{
if (c3 >= 32 * c4 && 32 * c4 + 31 >= c3)
for( c10 = -c6 + 1; c10 < 32 * c4; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
for( c10 = max(32 * c4, -c6 + 1); c10 <= min(32 * c4 + 31, c3 - c6 - 2); c10 += 1)
{
for( c12 = c10 + 1; c12 < min(c3 - c6, c3 + c10 - 2); c12 += 1)
V[(-c6)][(c3-c6)] = MIN(V[c10][c12] + EFL[(-c6)][(c3-c6)], V[(-c6)][(c3-c6)]);
if (c3 >= 32 * c4 && 32 * c4 + 31 >= c3)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN(W[(-c6)+1][c10] + W[c10+1][(c3-c6)-1], V[(-c6)][(c3-c6)]);
}
if (c3 >= 32 * c4 && 32 * c4 + 31 >= c3 && 32 * c4 + c6 + 32 >= c3)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][(c3-c6-1)], W[(c3-c6-1)+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
if (c3 >= 32 * c4 && 32 * c4 + 31 >= c3)
for( c10 = 32 * c4 + 32; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
else if (c0 + 31 * c4 + 1 == c1 + c3)
{
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
if (32 * c4 + c6 + 31 >= c3)
{
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
}
if (2 * c1 == c0 && ((N - 3) % 32) + c3 >= 16 * c0 + 32 && (N - 3) % 32 >= 30 && N + 28 >= ((N - 3) % 32) + c3)
for( c6 = -N + c3 + 1; c6 <= min(-N + 16 * c0 + 32, ((N - 1) % 32) - N + c3 + 1); c6 += 1)
{
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
if ((N + c3 - 3) % 32 >= 30 && N + 28 >= ((N + c3 - 3) % 32) + 32 * c1)
{
for( c6 = max(-N + 32 * c1 + 1, -N + c3 + 1); c6 <= ((N + c3 - 1) % 32) - N + 32 * c1 + 1; c6 += 1)
{
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
else if (16 * c0 + 2 == N && 32 * c1 + 2 == N && c3 + 1 == N)
{
for( c10 = 1; c10 < N - 1; c10 += 1)
W[0][(N-1)] += MIN ( MIN(W[0][c10], W[c10+1][(N-1)]), W[0][(N-1)]);
V[0][(N-1)] = MIN( MIN (V[0+1][(N-1)-1], EHF[0][(N-1)]), V[0][(N-1)]);
W[0][(N-1)] = MIN( MIN ( MIN ( W[0+1][(N-1)], W[0][(N-1)-1]), V[0][(N-1)]), W[0][(N-1)]);
}
else if (1024 * c1 + c3 + 30 >= 31 * N + 32 * c0 && (c0 - c1 - c3 + 1) % 31 == 0)
{
for( c6 = -N + 32 * c1 + 1; c6 <= 0; c6 += 1)
{
for( c10 = -c6 + 1; c10 < c3 - c6; c10 += 1)
W[(-c6)][(c3-c6)] += MIN ( MIN(W[(-c6)][c10], W[c10+1][(c3-c6)]), W[(-c6)][(c3-c6)]);
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
}
else
{
if (c3 == 1)
{
for( c6 = max(max(-30, -N + 2), -N + 32 * c0 + 1); c6 <= min(0, -N + 32 * c0 + 32); c6 += 1)
{
V[(-c6)][(-c6+1)] = MIN( MIN (V[(-c6)+1][(-c6+1)-1], EHF[(-c6)][(-c6+1)]), V[(-c6)][(-c6+1)]);
W[(-c6)][(-c6+1)] = MIN( MIN ( MIN ( W[(-c6)+1][(-c6+1)], W[(-c6)][(-c6+1)-1]), V[(-c6)][(-c6+1)]), W[(-c6)][(-c6+1)]);
}
}
else
{
for( c6 = max(-N + 3, -N + 32 * c0 + 1); c6 <= min(0, -N + 32 * c0 + 32); c6 += 1)
{
W[(-c6)][(-c6+2)] += MIN ( MIN(W[(-c6)][(-c6+1)], W[(-c6+1)+1][(-c6+2)]), W[(-c6)][(-c6+2)]);
if (c6 >= -29)
{
V[(-c6)][(-c6+2)] = MIN( MIN (V[(-c6)+1][(-c6+2)-1], EHF[(-c6)][(-c6+2)]), V[(-c6)][(-c6+2)]);
W[(-c6)][(-c6+2)] = MIN( MIN ( MIN ( W[(-c6)+1][(-c6+2)], W[(-c6)][(-c6+2)-1]), V[(-c6)][(-c6+2)]), W[(-c6)][(-c6+2)]);
}
}
}
for( c4 = max(1, -c0 + (N + c3) / 32 - 1); c4 <= min((N - 1) / 32, -c0 + (N + c3 - 1) / 32); c4 += 1)
for( c6 = max(max(-N + 32 * c0 + 1, -N + c3 + 1), c3 - 32 * c4 - 31); c6 <= min(-N + 32 * c0 + 32, c3 - 32 * c4); c6 += 1)
{
V[(-c6)][(c3-c6)] = MIN( MIN (V[(-c6)+1][(c3-c6)-1], EHF[(-c6)][(c3-c6)]), V[(-c6)][(c3-c6)]);
W[(-c6)][(c3-c6)] = MIN( MIN ( MIN ( W[(-c6)+1][(c3-c6)], W[(-c6)][(c3-c6)-1]), V[(-c6)][(c3-c6)]), W[(-c6)][(c3-c6)]);
}
}
}
}
|
valid.mob6.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_256_14_14_256_3_3.h"
#include "gen_ukr_A4B2gemm_1_256_14_14_256_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 14;
int Ny = 14;
int Nh = 3;
long long Astrides[6] = {0,2,4,6,8,10};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<256+0;c5+=256)
{
for(int f5=0;f5<256+0;f5+=256)
{
for(int xy5=0;xy5<196+0;xy5+=196)
{
for(int c4=c5;c4<min(256, 256+c5);c4+=256)
{
for(int xy4=xy5;xy4<min(196, 196+xy5);xy4+=196)
{
for(int f4=f5;f4<min(256, 256+f5);f4+=256)
{
for(int c3=c4;c3<min(256, 256+c4);c3+=Tc1)
{
for(int f3=f4;f3<min(256, 256+f4);f3+=Tf2)
{
for(int xy3=xy4;xy3<min(196, 196+xy4);xy3+=Txy3)
{
for(int xy2=xy3;xy2<min(196, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(256, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(256, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(256, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(196, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(256, 16+f2);f1+=16)
{
int ctile=min(Tc1, 256-c1);
int x1=xy1/14;
int y1=xy1%14/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*230400+c1_1*900+2*x1*30+2*y1*1+c1_2*1;
int offsetB=0+kf1_1*36864+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*50176+of1_1*196+x1*14+y1*1+of1_2*1;
if(14-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(14*14-xy1>=6){
for(int sti=14-y1;sti<6;sti+=1)
{
Astrides[sti]+=32;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=14-y1;sti<6;sti+=1)
{
Astrides[sti]-=32;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
GB_unaryop__minv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_uint8
// op(A') function: GB_tran__minv_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pair_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pair_uint32
// A.*B function (eWiseMult): GB_AemultB__pair_uint32
// A*D function (colscale): GB_AxD__pair_uint32
// D*A function (rowscale): GB_DxB__pair_uint32
// C+=B function (dense accum): GB_Cdense_accumB__pair_uint32
// C+=b function (dense accum): GB_Cdense_accumb__pair_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint32
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = 1
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = 1 ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pair_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pair_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pair_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__pair_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__pair_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__pair_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pair_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
fig7.11-nested-parallel.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#endif
int main()
{
int n = 4;
int *a, **b;
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(4);
#endif
if ( (a=(int *)malloc(n*sizeof(int))) == NULL ) {
perror("array a"); exit(-1);
}
if ( (b=(int **)malloc(n*sizeof(int *))) == NULL ) {
perror("array b"); exit(-1);
}
else {
for (int i=0; i<n; i++)
if ( (b[i]=(int *)malloc(n*sizeof(int))) == NULL )
{perror("array b"); exit(-1);}
}
#pragma omp parallel shared(n,a,b)
{
#pragma omp for
for (int i=0; i<n; i++)
{
a[i] = i + 1;
#pragma omp parallel for /*-- Okay - This is a parallel region --*/
for (int j=0; j<n; j++)
b[i][j] = a[i];
}
} /*-- End of parallel region --*/
for (int i=0; i<n; i++)
{
for (int j=0; j<n; j++)
printf("b[%d][%d] = %d ",i,j,b[i][j]);
printf("\n");
}
free(a);
free(b);
}
|
callback.h | #ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#define _DEFAULT_SOURCE
#include <stdio.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <omp.h>
#include <ompt.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
static const char* ompt_thread_t_values[] = {
NULL,
"ompt_thread_initial",
"ompt_thread_worker",
"ompt_thread_other"
};
static const char* ompt_task_status_t_values[] = {
NULL,
"ompt_task_complete",
"ompt_task_yield",
"ompt_task_cancel",
"ompt_task_others"
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_loop",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_callback_t ompt_get_callback;
static ompt_get_state_t ompt_get_state;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
omp_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame,
frame->enter_frame, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts an LD instruction which accounts for another 4 bytes. In contrast to
// X86 this instruction is always there, even for void runtime functions.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \
" or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr)
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
omp_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_t kind,
omp_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_t kind,
omp_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
omp_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
break;
}
break;
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_loop)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num);
break;
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
omp_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_t kind,
omp_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
}
}
static void
on_ompt_callback_master(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
}
}
static void on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const omp_frame_t *encountering_task_frame, ompt_data_t *parallel_data,
uint32_t requested_team_size, int flag, const void *codeptr_ra) {
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32
", codeptr_ra=%p, invoker=%d\n",
ompt_get_thread_data()->value, encountering_task_data->value,
encountering_task_frame->exit_frame,
encountering_task_frame->enter_frame, parallel_data->value,
requested_team_size, codeptr_ra, flag);
}
static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flag, const void *codeptr_ra) {
printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
encountering_task_data->value, flag, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const omp_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(type & ompt_task_initial)
{
ompt_data_t *parallel_data;
ompt_get_parallel_info(0, ¶llel_data, NULL);
if(parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
}
printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame : NULL, encountering_task_frame ? encountering_task_frame->enter_frame : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status);
if(prior_task_status == ompt_task_complete)
{
printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_task_dependences(
ompt_data_t *task_data,
const ompt_task_dependence_t *deps,
int ndeps)
{
printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
omp_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame, omptTaskFrame->enter_frame);
return 0; //success
}
#define register_callback_t(name, type) \
do{ \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \
ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
}while(0)
#define register_callback(name) register_callback_t(name, name##_t)
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback");
ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_callback(ompt_callback_mutex_acquire);
register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_callback(ompt_callback_nest_lock);
register_callback(ompt_callback_sync_region);
register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_callback(ompt_callback_control_tool);
register_callback(ompt_callback_flush);
register_callback(ompt_callback_cancel);
register_callback(ompt_callback_implicit_task);
register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_callback(ompt_callback_work);
register_callback(ompt_callback_master);
register_callback(ompt_callback_parallel_begin);
register_callback(ompt_callback_parallel_end);
register_callback(ompt_callback_task_create);
register_callback(ompt_callback_task_schedule);
register_callback(ompt_callback_task_dependences);
register_callback(ompt_callback_task_dependence);
register_callback(ompt_callback_thread_begin);
register_callback(ompt_callback_thread_end);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
|
trmv_x_csc_u_lo.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
trmv_csc_u_lo_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->cols_end, n, thread_num, partition);
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_n_s = partition[tid];
const ALPHA_INT local_n_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number) * m);
for(ALPHA_INT j = 0; j < m; ++j) {
alpha_setzero(tmp[tid][j]);
}
for(ALPHA_INT i = local_n_s; i < local_n_e; ++i)
{
const ALPHA_Number x_r = x[i];
register ALPHA_Number tmp_t;
alpha_setzero(tmp_t);
ALPHA_INT cs = A->cols_start[i];
ALPHA_INT ce = A->cols_end[i];
for(; cs < ce-3; cs += 4)
{
const ALPHA_INT row_0 = A->row_indx[cs];
const ALPHA_INT row_1 = A->row_indx[cs+1];
const ALPHA_INT row_2 = A->row_indx[cs+2];
const ALPHA_INT row_3 = A->row_indx[cs+3];
if(row_0 > i)
{
alpha_mul(tmp_t, A->values[cs], x_r);
alpha_madde(tmp[tid][row_0], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+1], x_r);
alpha_madde(tmp[tid][row_1], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+2], x_r);
alpha_madde(tmp[tid][row_2], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+3], x_r);
alpha_madde(tmp[tid][row_3], alpha, tmp_t);
}else if (row_1 > i){
alpha_mul(tmp_t, A->values[cs+1], x_r);
alpha_madde(tmp[tid][row_1], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+2], x_r);
alpha_madde(tmp[tid][row_2], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+3], x_r);
alpha_madde(tmp[tid][row_3], alpha, tmp_t);
}else if (row_2 > i){
alpha_mul(tmp_t, A->values[cs+2], x_r);
alpha_madde(tmp[tid][row_2], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+3], x_r);
alpha_madde(tmp[tid][row_3], alpha, tmp_t);
}else if (row_3 > i){
alpha_mul(tmp_t, A->values[cs+3], x_r);
alpha_madde(tmp[tid][row_3], alpha, tmp_t);
}
}
for (;cs < ce;++cs)
{
const ALPHA_INT row = A->row_indx[cs];
if (row > i){
alpha_mul(tmp_t, A->values[cs], x_r);
alpha_madde(tmp[tid][row], alpha, tmp_t);
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_madde(tmp_y, alpha, x[i]);
alpha_madde(tmp_y, y[i], beta);
y[i] = tmp_y;
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return trmv_csc_u_lo_omp(alpha, A, x, beta, y);
}
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),GetPixelBlue(p),
&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
bugz-50967-c.c | #include <stdio.h>
#include <omp.h>
// This smoke test has shows two problems.
// The first is compile fail when the subtype has a smaller size than 4 bytes
// That is both char and short fail. We need to generate compile fail
// for amdgcn when atomic type is char or short OR use a temp 4 byte
// value, which may not be atomic.
//
// The 2nd problem is a runtime fail. num_threads(64) has no control over
// actual number of threads when the default thread limit is 256.
// Set SUBTYPE to anything equal or greater size than int
// for atomic update not to cause fail in llc.
#define SUBTYPE char
#define REALTYPE int
int f() {
REALTYPE b = 0;
#pragma omp target map(tofrom: b)
{
#pragma omp teams distribute // thread_limit(64)
// add clause thread_limit(64) above to circumvent the problem
// not getting num_threads 64 in parallel below.
// Without thread_limit clause, this incorrectly reports 256
for(int i = 0; i < 1; ++i) {
SUBTYPE a = 0;
#pragma omp parallel num_threads(64)
{
#pragma omp atomic update
a += 1;
}
b = (REALTYPE) (a);
}
}
if (b == 64 ) return 0;
printf("ERROR: expecting 64 got %d\n",b);
return 1;
}
int main() { return f(); }
|
heisenberg_hamiltonian_mpi.h | /*****************************************************************************
*
* Rokko: Integrated Interface for libraries of eigenvalue decomposition
*
* Copyright (C) 2012-2019 Rokko Developers https://github.com/t-sakashita/rokko
*
* Distributed under the Boost Software License, Version 1.0. (See accompanying
* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*
*****************************************************************************/
#ifndef ROKKO_UTILITY_HEISENBERG_HAMILTONIAN_MPI_H
#define ROKKO_UTILITY_HEISENBERG_HAMILTONIAN_MPI_H
#include "mpi.h"
void multiply(const MPI_Comm comm, int L, int lattice_size, int lattice_first[], int lattice_second[], const double* v, double* w, double* buffer) {
int myrank, nproc;
MPI_Status status;
MPI_Comm_size(comm, &nproc);
MPI_Comm_rank(comm, &myrank);
int n = nproc;
int p = -1;
do {
n /= 2;
++p;
} while (n > 0);
if (nproc != (1 << p)) {
if ( myrank == 0 ) {
printf("This program can be run only for powers of 2\n");
}
MPI_Abort(comm, 1);
}
int N = 1 << (L-p);
for(int k=0; k<N; ++k) {
w[k] = 0.;
}
for (size_t l = 0; l < lattice_size; ++l) {
int i = lattice_first[l];
int j = lattice_second[l];
if (i < (L-p)) {
if (j < (L-p)) {
int m1 = 1 << i;
int m2 = 1 << j;
int m3 = m1 + m2;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m3) == m1) { // when (bit i == 1, bit j == 0) or (bit i == 0, bit j == 1)
w[k] += 0.5 * v[k^m3] - 0.25 * v[k];
} else if ((k & m3) == m2) {
w[k] += 0.5 * v[k^m3] - 0.25 * v[k];
} else {
w[k] += 0.25 * v[k];
}
}
} else {
int m = 1 << (j-(L-p));
MPI_Sendrecv(v, N, MPI_DOUBLE,
myrank ^ m, 0,
buffer, N, MPI_DOUBLE,
myrank ^ m, 0,
comm, &status);
int m1 = 1 << i;
if ((myrank & m) == m) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.25 * v[k];
} else {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
}
}
} else {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
} else {
w[k] += 0.25 * v[k];
}
}
}
}
} else {
if (j < (L-p)) {
int m = 1 << (i-(L-p));
MPI_Sendrecv(v, N, MPI_DOUBLE,
myrank ^ m, 0,
buffer, N, MPI_DOUBLE,
myrank ^ m, 0,
comm, &status);
int m1 = 1 << j;
if ((myrank & m) == m) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.25 * v[k];
} else {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
}
}
} else {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
if ((k & m1) == m1) {
w[k] += 0.5 * buffer[k^m1] - 0.25 * v[k];
} else {
w[k] += 0.25 * v[k];
}
}
}
} else {
int m = (1 << (i-(L-p))) + (1 << (j-(L-p)));
if (((myrank & m) != m) && ((myrank & m) != 0)) {
MPI_Sendrecv(v, N, MPI_DOUBLE,
myrank ^ m, 0,
buffer, N, MPI_DOUBLE,
myrank ^ m, 0,
comm, &status);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
w[k] += 0.5 * buffer[k] - 0.25 * v[k];
}
} else {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int k=0; k<N; ++k) {
w[k] += 0.25 * v[k];
}
}
}
}
}
}
#endif // ROKKO_UTILITY_HEISENBERG_HAMILTONIAN_MPI_H
|
Efficient_RANSAC.h | // Copyright (c) 2015 INRIA Sophia-Antipolis (France).
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org).
//
// $URL$
// $Id$
// SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial
//
//
// Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez
//
#ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#include <CGAL/license/Shape_detection.h>
#include <CGAL/Random.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h>
// for octree ------------------------------
#include <boost/iterator/filter_iterator.hpp>
#include <CGAL/bounding_box.h>
#include <CGAL/Iterator_range.h>
//----------
#include <vector>
#include <cmath>
#include <limits>
#include <fstream>
#include <sstream>
#include <functional>
// boost --------------
#include <CGAL/boost/iterator/counting_iterator.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
//---------------------
namespace CGAL {
namespace Shape_detection {
/*!
\ingroup PkgShapeDetectionRANSAC
\brief Shape detection algorithm based on the RANSAC method.
Given a point set in 3D space with unoriented normals, sampled on surfaces,
this class enables to detect subsets of connected points lying on the surface of primitive shapes.
Each input point is assigned to either none or at most one detected primitive
shape. The implementation follows \cgalCite{schnabel2007efficient}.
\tparam Traits must be a model of `EfficientRANSACTraits`.
*/
template <class Traits>
class Efficient_RANSAC {
public:
/// \cond SKIP_IN_MANUAL
struct Filter_unassigned_points {
Filter_unassigned_points() : m_shape_index(dummy) {}
Filter_unassigned_points(const std::vector<int> &shapeIndex)
: m_shape_index(shapeIndex) {}
bool operator()(std::size_t x) {
if (x < m_shape_index.size())
return m_shape_index[x] == -1;
else return true; // to prevent infinite incrementing
}
const std::vector<int>& m_shape_index;
std::vector<int> dummy;
};
typedef boost::filter_iterator<Filter_unassigned_points,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator;
///< iterator for indices of points.
/// \endcond
/// \name Types
/// @{
/// \cond SKIP_IN_MANUAL
typedef typename Traits::Input_range::iterator Input_iterator;
typedef typename Traits::FT FT; ///< number type.
typedef typename Traits::Point_3 Point; ///< point type.
typedef typename Traits::Vector_3 Vector; ///< vector type.
/// \endcond
typedef typename Traits::Input_range Input_range;
///< Model of the concept `Range` with random access iterators, providing input points and normals
/// through the following two property maps.
typedef typename Traits::Point_map Point_map;
///< Property map to access the location of an input point.
typedef typename Traits::Normal_map Normal_map;
///< Property map to access the unoriented normal of an input point.
typedef Shape_base<Traits> Shape; ///< Shape type.
typedef Plane<Traits> Plane_shape; ///< %Plane shape type.
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Shape_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`.
typedef unspecified_type Plane_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`.
#else
struct Shape_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base;
Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
struct Plane_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base;
Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
#endif
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Point_index_range;
///< `Iterator_range` with a bidirectional iterator with value type `std::size_t`
/// as indices into the input data that has not been assigned to a shape.
/// As this range class has no `size()` method, the method
/// `Efficient_RANSAC::number_of_unassigned_points()` is provided.
#else
typedef Iterator_range<Point_index_iterator>
Point_index_range;
#endif
/// @}
/// \name Parameters
/// @{
/*!
Parameters for the shape detection algorithm. They are explained in detail
in Section \ref Shape_detection_RANSACParameters of the User Manual.
*/
struct Parameters {
Parameters()
: probability((FT) 0.01)
, min_points((std::numeric_limits<std::size_t>::max)())
, epsilon(-1)
, normal_threshold((FT) 0.9)
, cluster_epsilon(-1)
{}
/*!
Probability to control search endurance.
%Default value is 0.05.
A lower probability provides a higher reliability and determinism at the cost
of longer running time due to a higher search endurance.
It must belong to the interval [0, 1].
*/
FT probability;
/*!
Minimum number of points in a shape.
%Default value is 1% of total number of input points.
It must belong to the interval [0, +inf).
*/
std::size_t min_points;
/*!
Maximum acceptable Euclidean distance between a point and a shape.
%Default value is 1% of the bounding box diagonal.
It must belong to the interval [0, +inf).
*/
FT epsilon;
/*!
Maximum threshold on the dot product between the estimated
shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9).
%Default value is 0.9 (around 25 degrees).
It must belong to the interval [0, 1].
*/
FT normal_threshold;
/*!
Maximum acceptable Euclidean distance between points, which are assumed to be neighbors.
%Default value is 1% of the bounding box diagonal.
It must belong to the interval [0, +inf).
*/
FT cluster_epsilon;
};
/// @}
private:
typedef internal::Octree<internal::DirectPointAccessor<Traits> >
Direct_octree;
typedef internal::Octree<internal::IndexedPointAccessor<Traits> >
Indexed_octree;
//--------------------------------------------typedef
// Creates a function pointer for instancing shape instances.
template <class ShapeT>
static Shape *factory() {
return new ShapeT;
}
public:
/// \name Initialization
/// @{
/*!
Constructs an empty shape detection object.
*/
Efficient_RANSAC(Traits t = Traits())
: m_traits(t)
, m_direct_octrees(nullptr)
, m_global_octree(nullptr)
, m_num_subsets(0)
, m_num_available_points(0)
, m_num_total_points(0)
, m_valid_iterators(false)
{}
/*!
Releases all memory allocated by this instance including shapes.
*/
~Efficient_RANSAC() {
clear();
}
/*!
Retrieves the traits class.
*/
const Traits&
traits() const
{
return m_traits;
}
/*!
Retrieves the point property map.
*/
const Point_map& point_map() const { return m_point_pmap; }
/*!
Retrieves the normal property map.
*/
const Normal_map& normal() const { return m_normal_pmap; }
Input_iterator input_iterator_first() const
{
return m_input_iterator_first;
}
Input_iterator input_iterator_beyond() const
{
return m_input_iterator_beyond;
}
/*!
Sets the input data. The range must stay valid
until the detection has been performed and the access to the
results is no longer required. The data in the input is reordered by the methods
`detect()` and `preprocess()`. This function first calls `clear()`.
*/
void set_input(
Input_range& input_range,
///< Range of input data.
Point_map point_map = Point_map(),
///< Property map to access the position of an input point.
Normal_map normal_map = Normal_map()
///< Property map to access the normal of an input point.
) {
m_point_pmap = point_map;
m_normal_pmap = normal_map;
m_input_iterator_first = input_range.begin();
m_input_iterator_beyond = input_range.end();
clear();
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points = std::distance(
m_input_iterator_first, m_input_iterator_beyond);
m_valid_iterators = true;
}
/*!
Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`.
For example, for registering a plane as detectable shape, you should call
`ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note
that if your call is within a template, you should add the `template`
keyword just before `add_shape_factory`:
`ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`.
*/
template <class Shape_type>
void add_shape_factory() {
m_shape_factories.push_back(factory<Shape_type>);
}
/*!
Constructs internal data structures required for the shape detection.
These structures only depend on the input data, i.e. the points and
normal vectors. This method is called by `detect()`, if it was not called
before by the user.
*/
bool preprocess() {
if (m_num_total_points == 0)
return false;
// Generation of subsets
m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t)
std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2);
// SUBSET GENERATION ->
// approach with increasing subset sizes -> replace with octree later on
Input_iterator last = m_input_iterator_beyond - 1;
std::size_t remainingPoints = m_num_total_points;
m_available_octree_sizes.resize(m_num_subsets);
m_direct_octrees = new Direct_octree *[m_num_subsets];
for (int s = int(m_num_subsets) - 1;s >= 0;--s) {
std::size_t subsetSize = remainingPoints;
std::vector<std::size_t> indices(subsetSize);
if (s) {
subsetSize >>= 1;
for (std::size_t i = 0;i<subsetSize;i++) {
std::size_t index = get_default_random()(2);
index = index + (i<<1);
index = (index >= remainingPoints) ? remainingPoints - 1 : index;
indices[i] = index;
}
// move points to the end of the point vector
std::size_t j = subsetSize;
do {
j--;
typename std::iterator_traits<Input_iterator>::value_type
tmp = (*last);
*last = m_input_iterator_first[indices[std::size_t(j)]];
m_input_iterator_first[indices[std::size_t(j)]] = tmp;
last--;
} while (j > 0);
m_direct_octrees[s] = new Direct_octree(
m_traits, last + 1,
last + subsetSize + 1,
m_point_pmap, m_normal_pmap,
remainingPoints - subsetSize);
}
else
m_direct_octrees[0] = new Direct_octree(
m_traits, m_input_iterator_first,
m_input_iterator_first + (subsetSize),
m_point_pmap, m_normal_pmap,
0);
m_available_octree_sizes[s] = subsetSize;
m_direct_octrees[s]->createTree(m_options.cluster_epsilon);
remainingPoints -= subsetSize;
}
m_global_octree = new Indexed_octree(
m_traits, m_input_iterator_first, m_input_iterator_beyond,
m_point_pmap, m_normal_pmap);
m_global_octree->createTree(m_options.cluster_epsilon);
return true;
}
/// @}
/// \name Memory Management
/// @{
/*!
Removes all shape types registered for detection.
*/
void clear_shape_factories() {
m_shape_factories.clear();
}
/*!
Frees memory allocated for the internal search structures but keeps the detected shapes.
It invalidates the range retrieved using `unassigned_points()`.
*/
void clear_octrees() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
if (m_global_octree) {
delete m_global_octree;
m_global_octree = nullptr;
}
if (m_direct_octrees) {
for (std::size_t i = 0;i<m_num_subsets;i++)
delete m_direct_octrees[i];
delete [] m_direct_octrees;
m_direct_octrees = nullptr;
}
m_num_subsets = 0;
}
/*!
Calls `clear_octrees()` and removes all detected shapes.
All internal structures are cleaned, including formerly detected shapes.
Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()`
are invalidated.
*/
void clear() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
std::vector<int>().swap(m_shape_index);
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
clear_octrees();
clear_shape_factories();
}
/// @}
/// \name Detection
/// @{
/*!
Performs the shape detection. Shape types considered during the detection
are those registered using `add_shape_factory()`.
\param options parameters for shape detection
\param callback can be omitted if the algorithm should be run
without any callback. It is called regularly when the algorithm
is running: the current advancement (between 0.0 and 1.0) is
passed as parameter. If it returns `true`, then the algorithm
continues its execution normally; if it returns `false`, the
algorithm is stopped. Note that this interruption may leave the
class in an invalid state.
\return `true` if shape types have been registered and
input data has been set. Otherwise, `false` is returned.
*/
bool detect(const Parameters &options = Parameters(),
const std::function<bool(double)>& callback
= std::function<bool(double)>())
{
m_options = options;
// No shape types for detection or no points provided, exit
if (m_shape_factories.size() == 0 ||
(m_input_iterator_beyond - m_input_iterator_first) == 0)
return false;
if (m_num_subsets == 0 || m_global_octree == 0) {
if (!preprocess())
return false;
}
if (callback && !callback(0.))
return false;
// Reset data structures possibly used by former search
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
for (std::size_t i = 0;i<m_num_subsets;i++) {
m_available_octree_sizes[i] = m_direct_octrees[i]->size();
}
// Use bounding box diagonal as reference for default values
Bbox_3 bbox = m_global_octree->boundingBox();
FT bbox_diagonal = (FT) CGAL::sqrt(
(bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin())
+ (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin())
+ (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin()));
// Epsilon or cluster_epsilon have been set by the user?
// If not, derive from bounding box diagonal
m_options.epsilon = (m_options.epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.epsilon;
m_options.cluster_epsilon = (m_options.cluster_epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon;
// Minimum number of points has been set?
m_options.min_points =
(m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ?
(std::size_t)((FT)0.01 * m_num_available_points) :
m_options.min_points;
m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points;
// Initializing the shape index
m_shape_index.assign(m_num_available_points, -1);
if (m_options.min_points > m_num_available_points)
return true;
// List of all randomly drawn candidates
// with the minimum number of points
std::vector<Shape *> candidates;
// Identifying minimum number of samples
m_required_samples = 0;
for (std::size_t i = 0;i<m_shape_factories.size();i++) {
Shape *tmp = (Shape *) m_shape_factories[i]();
m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size());
delete tmp;
}
std::size_t first_sample; // first sample for RANSAC
FT best_expected = 0;
// number of points that have been assigned to a shape
std::size_t num_invalid = 0;
std::size_t generated_candidates = 0;
std::size_t failed_candidates = 0;
std::size_t limit_failed_candidates = (std::max)(std::size_t(10000),
std::size_t(m_input_iterator_beyond
- m_input_iterator_first)
/ std::size_t(100));
bool force_exit = false;
bool keep_searching = true;
do { // main loop
best_expected = 0;
if (keep_searching)
do {
// Search (remaining_points / min_points) shapes (max 200 per iteration, min 1)
std::size_t search_number
= (std::min)(std::size_t(200),
(std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)),
std::size_t(1)));
for (std::size_t nb = 0; nb < search_number; ++ nb)
{
// Generate candidates
//1. pick a point p1 randomly among available points
std::set<std::size_t> indices;
bool done = false;
do {
do
first_sample = get_default_random()(
static_cast<unsigned int>(m_num_available_points));
while (m_shape_index[first_sample] != -1);
done = m_global_octree->drawSamplesFromCellContainingPoint(
get(m_point_pmap,
*(m_input_iterator_first + first_sample)),
select_random_octree_level(),
indices,
m_shape_index,
m_required_samples);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
} while (m_shape_index[first_sample] != -1 || !done);
generated_candidates++;
//add candidate for each type of primitives
for(typename std::vector<Shape *(*)()>::iterator it =
m_shape_factories.begin(); it != m_shape_factories.end(); it++) {
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
Shape *p = (Shape *) (*it)();
//compute the primitive and says if the candidate is valid
p->compute(indices,
m_input_iterator_first,
m_traits,
m_point_pmap,
m_normal_pmap,
m_options.epsilon,
m_options.normal_threshold);
if (p->is_valid()) {
improve_bound(p, m_num_available_points - num_invalid, 1, 500);
//evaluate the candidate
if(p->max_bound() >= m_options.min_points && p->score() > 0) {
if (best_expected < p->expected_value())
best_expected = p->expected_value();
candidates.push_back(p);
}
else {
failed_candidates++;
delete p;
}
}
else {
failed_candidates++;
delete p;
}
}
}
if (failed_candidates >= limit_failed_candidates)
{
force_exit = true;
}
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates, m_global_octree->maxLevel())
> m_options.probability);
} while( !force_exit
&& stop_probability((std::size_t) best_expected,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability
&& keep_searching);
// end of generate candidate
if (force_exit) {
break;
}
if (candidates.empty())
continue;
// Now get the best candidate in the current set of all candidates
// Note that the function sorts the candidates:
// the best candidate is always the last element of the vector
Shape *best_candidate =
get_best_candidate(candidates, m_num_available_points - num_invalid);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// If search is done and the best candidate is too small, we are done.
if (!keep_searching && best_candidate->m_score < m_options.min_points)
break;
if (!best_candidate)
continue;
best_candidate->m_indices.clear();
best_candidate->m_score =
m_global_octree->score(best_candidate,
m_shape_index,
FT(3) * m_options.epsilon,
m_options.normal_threshold);
best_expected = static_cast<FT>(best_candidate->m_score);
best_candidate->connected_component(best_candidate->m_indices,
m_options.cluster_epsilon);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// check score against min_points and clear out candidates if too low
if (best_candidate->indices_of_assigned_points().size() <
m_options.min_points)
{
if (!(best_candidate->indices_of_assigned_points().empty()))
for (std::size_t i = 0;i < candidates.size() - 1;i++) {
if (best_candidate->is_same(candidates[i])) {
delete candidates[i];
candidates[i] = nullptr;
}
}
candidates.back() = nullptr;
delete best_candidate;
best_candidate = nullptr;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// Trimming candidates list
std::size_t empty = 0, occupied = 0;
while (empty < candidates.size()) {
while (empty < candidates.size() && candidates[empty]) empty++;
if (empty >= candidates.size())
break;
if (occupied < empty)
occupied = empty + 1;
while (occupied < candidates.size() && !candidates[occupied])
occupied++;
if (occupied >= candidates.size())
break;
candidates[empty] = candidates[occupied];
candidates[occupied] = nullptr;
empty++;
occupied++;
}
candidates.resize(empty);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
}
else
if (stop_probability((std::size_t) best_candidate->expected_value(),
(m_num_available_points - num_invalid),
generated_candidates,
m_global_octree->maxLevel())
<= m_options.probability) {
// Remove candidate from list
candidates.back() = nullptr;
//1. add best candidate to final result.
m_extracted_shapes->push_back(
boost::shared_ptr<Shape>(best_candidate));
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
//2. remove the points
const std::vector<std::size_t> &indices_points_best_candidate =
best_candidate->indices_of_assigned_points();
// update generated candidates to reflect removal of points
generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() /
float(m_num_available_points - num_invalid)), 3.f)
* generated_candidates);
//2.3 Remove the points from the subtrees
for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) {
m_shape_index[indices_points_best_candidate.at(i)] =
int(m_extracted_shapes->size()) - 1;
num_invalid++;
for (std::size_t j = 0;j<m_num_subsets;j++) {
if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) {
std::size_t offset = m_direct_octrees[j]->offset();
if (offset <= indices_points_best_candidate.at(i) &&
(indices_points_best_candidate.at(i) - offset)
< m_direct_octrees[j]->size()) {
m_available_octree_sizes[j]--;
}
}
}
}
failed_candidates = 0;
best_expected = 0;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::vector<std::size_t> subset_sizes(m_num_subsets);
subset_sizes[0] = m_available_octree_sizes[0];
for (std::size_t i = 1;i<m_num_subsets;i++) {
subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i];
}
//3. Remove points from candidates common with extracted primitive
//#pragma omp parallel for
best_expected = 0;
for (std::size_t i=0;i< candidates.size()-1;i++) {
if (candidates[i]) {
candidates[i]->update_points(m_shape_index);
candidates[i]->compute_bound(
subset_sizes[candidates[i]->m_nb_subset_used - 1],
m_num_available_points - num_invalid);
if (candidates[i]->max_bound() < m_options.min_points) {
delete candidates[i];
candidates[i] = nullptr;
}
else {
best_expected = (candidates[i]->expected_value() > best_expected) ?
candidates[i]->expected_value() : best_expected;
}
}
}
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::size_t start = 0, end = candidates.size() - 1;
while (start < end) {
while (candidates[start] && start < end) start++;
while (!candidates[end] && start < end) end--;
if (!candidates[start] && candidates[end] && start < end) {
candidates[start] = candidates[end];
candidates[end] = nullptr;
start++;
end--;
}
}
if (candidates[end]) end++;
candidates.resize(end);
}
else if (!keep_searching)
++ generated_candidates;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability);
}
while((keep_searching
&& FT(m_num_available_points - num_invalid) >= m_options.min_points)
|| best_expected >= m_options.min_points);
// Clean up remaining candidates.
for (std::size_t i = 0;i<candidates.size();i++)
delete candidates[i];
candidates.resize(0);
m_num_available_points -= num_invalid;
return true;
}
/// @}
/// \name Access
/// @{
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type
`boost::shared_ptr<Shape>` over the detected shapes in the order of detection.
Depending on the chosen probability
for the detection, the shapes are ordered with decreasing size.
*/
Shape_range shapes() const {
return Shape_range(m_extracted_shapes);
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with
value type `boost::shared_ptr<Plane_shape>` over only the
detected planes in the order of detection. Depending on the
chosen probability for the detection, the planes are ordered
with decreasing size.
*/
Plane_range planes() const {
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes
= boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >();
for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i)
{
boost::shared_ptr<Plane_shape> pshape
= boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]);
// Ignore all shapes other than plane
if (pshape != boost::shared_ptr<Plane_shape>())
planes->push_back (pshape);
}
return Plane_range(planes);
}
/*!
Number of points not assigned to a shape.
*/
std::size_t number_of_unassigned_points() const {
return m_num_available_points;
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t`
as indices into the input data that has not been assigned to a shape.
*/
Point_index_range indices_of_unassigned_points() {
Filter_unassigned_points fup(m_shape_index);
Point_index_iterator p1 =
boost::make_filter_iterator<Filter_unassigned_points>(
fup,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0),
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size()));
return make_range(p1, Point_index_iterator(p1.end()));
}
/// @}
private:
int select_random_octree_level() {
return (int) get_default_random()(
static_cast<unsigned int>(m_global_octree->maxLevel() + 1));
}
Shape* get_best_candidate(std::vector<Shape* >& candidates,
const std::size_t num_available_points) {
if (candidates.size() == 1)
return candidates.back();
int index_worse_candidate = 0;
bool improved = true;
while (index_worse_candidate < (int)candidates.size() - 1 && improved) {
improved = false;
typename Shape::Compare_by_max_bound comp;
std::sort(candidates.begin() + index_worse_candidate,
candidates.end(),
comp);
//refine the best one
improve_bound(candidates.back(),
num_available_points, m_num_subsets,
m_options.min_points);
int position_stop;
//Take all those intersecting the best one, check for equal ones
for (position_stop = int(candidates.size()) - 1;
position_stop > index_worse_candidate;
position_stop--) {
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
if (candidates.at(position_stop)->max_bound()
<= m_options.min_points)
break; //the following candidate doesn't have enough points!
//if we reach this point, there is an overlap
// between best one and position_stop
//so request refining bound on position_stop
improved |= improve_bound(candidates.at(position_stop),
num_available_points,
m_num_subsets,
m_options.min_points);
//test again after refined
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
}
index_worse_candidate = position_stop;
}
return candidates.back();
}
bool improve_bound(Shape *candidate,
std::size_t num_available_points,
std::size_t max_subset,
std::size_t min_points) {
if (candidate->m_nb_subset_used >= max_subset)
return false;
if (candidate->m_nb_subset_used >= m_num_subsets)
return false;
candidate->m_nb_subset_used =
(candidate->m_nb_subset_used >= m_num_subsets) ?
m_num_subsets - 1 : candidate->m_nb_subset_used;
//what it does is add another subset and recompute lower and upper bound
//the next subset to include is provided by m_nb_subset_used
std::size_t num_points_evaluated = 0;
for (std::size_t i=0;i<candidate->m_nb_subset_used;i++)
num_points_evaluated += m_available_octree_sizes[i];
// need score of new subset as well as sum of
// the score of the previous considered subset
std::size_t new_score = 0;
std::size_t new_sampled_points = 0;
do {
new_score = m_direct_octrees[candidate->m_nb_subset_used]->score(
candidate,
m_shape_index,
m_options.epsilon,
m_options.normal_threshold);
candidate->m_score += new_score;
num_points_evaluated +=
m_available_octree_sizes[candidate->m_nb_subset_used];
new_sampled_points +=
m_available_octree_sizes[candidate->m_nb_subset_used];
candidate->m_nb_subset_used++;
} while (new_sampled_points < min_points &&
candidate->m_nb_subset_used < m_num_subsets);
candidate->m_score = candidate->m_indices.size();
candidate->compute_bound(num_points_evaluated, num_available_points);
return true;
}
inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const {
return (std::min<FT>)(std::pow(FT(1) - FT(largest_candidate)
/ (FT(num_pts) * FT(octree_depth+1)
* FT(1 << (m_required_samples - 1))),
int(num_candidates)), FT(1));
}
private:
Parameters m_options;
// Traits class.
Traits m_traits;
// Octrees build on input data for quick shape evaluation and
// sample selection within an octree cell.
Direct_octree **m_direct_octrees;
Indexed_octree *m_global_octree;
std::vector<std::size_t> m_available_octree_sizes;
std::size_t m_num_subsets;
// maps index into points to assigned extracted primitive
std::vector<int> m_shape_index;
std::size_t m_num_available_points;
std::size_t m_num_total_points;
std::size_t m_required_samples;
//give the index of the subset of point i
std::vector<int> m_index_subsets;
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes;
std::vector<Shape *(*)()> m_shape_factories;
// iterators of input data
bool m_valid_iterators;
Input_iterator m_input_iterator_first, m_input_iterator_beyond;
Point_map m_point_pmap;
Normal_map m_normal_pmap;
};
}
}
#endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
|
FastTree-2.1.10.c | /*
* FastTree -- inferring approximately-maximum-likelihood trees for large
* multiple sequence alignments.
*
* Morgan N. Price
* http://www.microbesonline.org/fasttree/
*
* Thanks to Jim Hester of the Cleveland Clinic Foundation for
* providing the first parallel (OpenMP) code, Siavash Mirarab of
* UT Austin for implementing the WAG option, Samuel Shepard
* at the CDC for suggesting and helping with the -quote option, and
* Aaron Darling (University of Technology, Sydney) for numerical changes
* for wide alignments of closely-related sequences.
*
* Copyright (C) 2008-2015 The Regents of the University of California
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* or visit http://www.gnu.org/copyleft/gpl.html
*
* Disclaimer
*
* NEITHER THE UNITED STATES NOR THE UNITED STATES DEPARTMENT OF ENERGY,
* NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY, EXPRESS OR IMPLIED,
* OR ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY,
* COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, APPARATUS, PRODUCT,
* OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE
* PRIVATELY OWNED RIGHTS.
*/
/*
* To compile FastTree, do:
* gcc -Wall -O3 -finline-functions -funroll-loops -o FastTree -lm FastTree.c
* Use -DNO_SSE to turn off use of SSE3 instructions
* (should not be necessary because compiler should not set __SSE__ if
* not available, and modern mallocs should return 16-byte-aligned values)
* Use -DOPENMP -fopenmp to use multiple threads (note, old versions of gcc
* may not support -fopenmp)
* Use -DTRACK_MEMORY if you want detailed reports of memory usage,
* but results are not correct above 4GB because mallinfo stores int values.
* It also makes FastTree run significantly slower.
*
* To get usage guidance, do:
* FastTree -help
*
* FastTree uses profiles instead of a distance matrix, and computes
* support values for each split from the profiles of the 4 nodes
* around the split. It stores a profile for each node and a average
* profile over all active nodes (the "out-profile" for computing the
* total sum of distance to other nodes). The neighbor joining phase
* requires O(N*L*a) space, where N is the number of sequences, L is
* the alignment width, and a is the alphabet size. The top-hits
* heuristic requires an additional O(N sqrt(N)) memory. After
* neighbor-joining, FastTree improves the topology with
* nearest-neighbor interchanges (NNIs) and subtree-prune-regraft
* moves (SPRs), which does not have a significant additional memory
* requirement. (We need only store "up-profiles" on the path from our
* current traversal point to the root.) These take O(NLa) time per
* round, and with default settings, O(N log(N) L a) time total.
* FastTree further improves the topology with maximum-likelihood
* NNIs, using similar data structures and complexity, but with a
* higher constant factor, and now the "profiles" are actually
* posterior distributions for that subtree. Finally, FastTree
* resamples the site likelihoods around each NNI and uses
* the Shimodaira Hasegawa test to estimate the reliability of each split.
*
* Overview of the neighbor-joining phase:
*
* Although FastTree uses a log correction on profile distances to
* account for multiple substitutions when doing NNIs and SPRs, the
* operations on the profiles themselves involve "additive" distances
* -- either %different (for nucleotide) or by using an amino acid
* similarity matrix (for proteins). If we are using %different as
* our distance matrix then
*
* Profile_distance(A,B) = 1 - sum over characters of freq(A)*freq(B)
*
* and we can average this value over positions. Positions with gaps
* are weighted by %ungapped(A) * %ungapped(B).
*
* If we are using an amino acid dissimilarity matrix D(i,j) then at
* each position
*
* Profile_distance(A,B) = sum(i,j) freq(A==i) * freq(B==j) * D(i,j)
* = sum(k) Ak * Bk * Lambda(k)
*
* where k iterates over 20 eigenvectors, Lambda(k) is the eigenvalue,
* and if A==i, then Ak is the kth column of the inverse of the
* eigenvector matrix.
*
* The exhaustive approach (-slow) takes O(N**3*L*a) time, but
* this can be reduced to as little as O(N**(3/2)*log(N)*L*a) time
* by using heuristics.
*
* It uses a combination of three heuristics: a visible set similar to
* that of FastTree (Elias & Lagergren 2005), a local hill-climbing
* search for a better join (as in relaxed neighbor-joining, Evans et
* al. 2006), and a top-hit list to reduce the search space (see
* below).
*
* The "visible" set stores, for each node, the best join for that
* node, as identified at some point in the past
*
* If top-hits are not being used, then the neighbor-joining phase can
* be summarized as:
*
* Compute the out-profile by averaging the leaves
* Compute the out-distance of each leaf quickly, using the out-profile
* Compute the visible set (or approximate it using top-hits, see below)
* Until we're down to 3 active nodes:
* Find the best join in the visible set
* (This involves recomputing the neighbor-joining criterion,
* as out-distances and #active nodes may have changed)
* Follow a chain of best hits (again recomputing the criterion)
* until we find a locally best join, as in relaxed neighbor joining
* Create a profile of the parent node, either using simple averages (default)
* or using weighted joining as in BIONJ (if -bionj was specified)
* Update the out-profile and the out-distances
* Update the visible set:
* find the best join for the new joined node
* replace hits to the joined children with hits to the parent
* if we stumble across a join for the new node that is better
* than the corresponding entry in the visible set, "reset"
* that entry.
*
* For each iteration, this method does
* O(N) work to find the best hit in the visible set
* O(L*N*a*log(N)) work to do the local search, where log(N)
* is a pessimistic estimate of the number of iterations. In
* practice, we average <1 iteration for 2,000 sequences.
* With -fastest, this step is omitted.
* O(N*a) work to compute the joined profile and update the out-profile
* O(L*N*a) work to update the out-distances
* O(L*N*a) work to compare the joined profile to the other nodes
* (to find the new entry in the visible set)
*
* and there are N-3 iterations, so it takes O(N**2 * L * log(N) * a) time.
*
* The profile distances give exactly the same result as matrix
* distances in neighbor-joining or BIONJ would if there are no gaps
* in the alignment. If there are gaps, then it is an
* approximation. To get the same result we also store a "diameter"
* for each node (diameter is 0 for leaves).
*
* In the simpler case (NJ rather than BIONJ), when we join A and B to
* give a new node AB,
*
* Profile(AB) = (A+B)/2
* Profile_distance(AB,C) = (Profile_distance(A,C)+Profile_distance(B,C))/2
* because the formulas above are linear
*
* And according to the neighor-joining rule,
* d(AB,C) = (d(A,C)+d(B,C)-d(A,B))/2
*
* and we can achieve the same value by writing
* diameter(AB) = pd(A,B)/2
* diameter(leaf) = 0
* d(A,B) = pd(A,B) - diameter(A) - diameter(B)
*
* because
* d(AB,C) = (d(A,C)+d(B,C)-d(A,B))/2
* = (pd(A,C)-diam(A)-diam(C)+pd(B,C)-diam(B)-diam(C)-d(A,B)+diam(A)+diam(B))/2
* = (pd(A,C)+pd(B,C))/2 - diam(C) - pd(A,B)
* = pd(AB,C) - diam(AB) - diam(C)
*
* If we are using BIONJ, with weight lambda for the join:
* Profile(AB) = lambda*A + (1-lambda)*B
* then a similar argument gives
* diam(AB) = lambda*diam(A) + (1-lambda)*diam(B) + lambda*d(A,AB) + (1-lambda)*d(B,AB),
*
* where, as in neighbor joining,
* d(A,AB) = d(A,B) + (total out_distance(A) - total out_distance(B))/(n-2)
*
* A similar recursion formula works for the "variance" matrix of BIONJ,
* var(AB,C) = lambda*var(A,C) + (1-lambda)*var(B,C) - lambda*(1-lambda)*var(A,B)
* is equivalent to
* var(A,B) = pv(A,B) - vd(A) - vd(B), where
* pv(A,B) = pd(A,B)
* vd(A) = 0 for leaves
* vd(AB) = lambda*vd(A) + (1-lambda)*vd(B) + lambda*(1-lambda)*var(A,B)
*
* The top-hist heuristic to reduce the work below O(N**2*L) stores a top-hit
* list of size m=sqrt(N) for each active node.
*
* The list can be initialized for all the leaves in sub (N**2 * L) time as follows:
* Pick a "seed" sequence and compare it to all others
* Store the top m hits of the seed as its top-hit list
* Take "close" hits of the seed(within the top m, and see the "close" parameter),
* and assume that their top m hits lie within the top 2*m hits of the seed.
* So, compare them to the seed's neighors (if they do not already
* have a top hit list) and set their top hits.
*
* This method does O(N*L) work for each seed, or O(N**(3/2)*L) work total.
*
* To avoid doing O(N*L) work at each iteration, we need to avoid
* updating the visible set and the out-distances. So, we use "stale"
* out-distances, and when searching the visible set for the best hit,
* we only inspect the top m=sqrt(N) entries. We then update those
* out-distances (up to 2*m*L*a work) and then find the best hit.
*
* To avoid searching the entire visible set, FastTree keeps
* and updates a list of the top sqrt(N) entries in the visible set.
* This costs O(sqrt(N)) time per join to find the best entry and to
* update, or (N sqrt(N)) time overall.
*
* Similarly, when doing the local hill-climbing, we avoid O(N*L) work
* by only considering the top-hits for the current node. So this adds
* O(m*a*log(N)) work per iteration.
*
* When we join two nodes, we compute profiles and update the
* out-profile as before. We need to compute the best hits of the node
* -- we merge the lists for the children and select the best up-to-m
* hits. If the top hit list contains a stale node we replace it with
* its parent. If we still have <m/2 entries, we do a "refresh".
*
* In a "refresh", similar to the fast top-hit computation above, we
* compare the "seed", in this case the new joined node, to all other
* nodes. We compare its close neighbors (the top m hits) to all
* neighbors (the top 2*m hits) and update the top-hit lists of all
* neighbors (by merging to give a list of 3*m entries and then
* selecting the best m entries).
*
* Finally, during these processes we update the visible sets for
* other nodes with better hits if we find them, and we set the
* visible entry for the new joined node to the best entry in its
* top-hit list. (And whenever we update a visible entry, we
* do O(sqrt(N)) work to update the top-visible list.)
* These udpates are not common so they do not alter the
* O(N sqrt(N) log(N) L a) total running time for the joining phase.
*
* Second-level top hits
*
* With -fastest or with -2nd, FastTree uses an additional "2nd-level" top hits
* heuristic to reduce the running time for the top-hits phase to
* O(N**1.25 L) and for the neighbor-joining phase to O(N**1.25 L a).
* This also reduces the memory usage for the top-hits lists to
* O(N**1.25), which is important for alignments with a million
* sequences. The key idea is to store just q = sqrt(m) top hits for
* most sequences.
*
* Given the neighbors of A -- either for a seed or for a neighbor
* from the top-hits heuristic, if B is within the top q hits of A, we
* set top-hits(B) from the top 3*q top-hits of A. And, we record that
* A is the "source" of the hits for B, so if we run low on hits for
* B, instead of doing a full refresh, we can do top-hits(B) :=
* top-hits(B) union top-hits(active_ancestor(A)).
* During a refresh, these "2nd-level" top hits are updated just as
* normal, but the source is maintained and only q entries are stored,
* until we near the end of the neighbor joining phase (until the
* root as 2*m children or less).
*
* Parallel execution with OpenMP
*
* If you compile FastTree with OpenMP support, it will take
* advantage of multiple CPUs on one machine. It will parallelize:
*
* The top hits phase
* Comparing one node to many others during the NJ phase (the simplest kind of join)
* The refresh phase
* Optimizing likelihoods for 3 alternate topologies during ML NNIs and ML supports
* (only 3 threads can be used)
*
* This accounts for most of the O(N L a) or slower steps except for
* minimum-evolution NNIs (which are fast anyway), minimum-evolution SPRs,
* selecting per-site rates, and optimizing branch lengths outside of ML NNIs.
*
* Parallelizing the top hits phase may lead to a slight change in the tree,
* as some top hits are computed from different (and potentially less optimal source).
* This means that results on repeated runs may not be 100% identical.
* However, this should not have any significant effect on tree quality
* after the NNIs and SPRs.
*
* The OpenMP code also turns off the star-topology test during ML
* NNIs, which may lead to slight improvements in likelihood.
*/
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include <ctype.h>
#include <unistd.h>
#ifdef TRACK_MEMORY
/* malloc.h apparently doesn't exist on MacOS */
#include <malloc.h>
#endif
/* Compile with -DOPENMP to turn on multithreading */
#ifdef OPENMP
#include <omp.h>
#endif
/* By default, tries to compile with SSE instructions for greater speed.
But if compiled with -DUSE_DOUBLE, uses double precision instead of single-precision
floating point (2x memory required), does not use SSE, and allows much shorter
branch lengths.
*/
#ifdef __SSE__
#if !defined(NO_SSE) && !defined(USE_DOUBLE)
#define USE_SSE3
#endif
#endif
#ifdef USE_DOUBLE
#define SSE_STRING "Double precision (No SSE3)"
typedef double numeric_t;
#define ScanNumericSpec "%lf"
#else
typedef float numeric_t;
#define ScanNumericSpec "%f"
#endif
#ifdef USE_SSE3
#define SSE_STRING "SSE3"
#define ALIGNED __attribute__((aligned(16)))
#define IS_ALIGNED(X) ((((unsigned long) new) & 15L) == 0L)
#include <xmmintrin.h>
#else
#define ALIGNED
#define IS_ALIGNED(X) 1
#ifndef USE_DOUBLE
#define SSE_STRING "No SSE3"
#endif
#endif /* USE_SSE3 */
#define FT_VERSION "2.1.10"
char *usage =
" FastTree protein_alignment > tree\n"
" FastTree < protein_alignment > tree\n"
" FastTree -out tree protein_alignment\n"
" FastTree -nt nucleotide_alignment > tree\n"
" FastTree -nt -gtr < nucleotide_alignment > tree\n"
" FastTree < nucleotide_alignment > tree\n"
"FastTree accepts alignments in fasta or phylip interleaved formats\n"
"\n"
"Common options (must be before the alignment file):\n"
" -quiet to suppress reporting information\n"
" -nopr to suppress progress indicator\n"
" -log logfile -- save intermediate trees, settings, and model details\n"
" -fastest -- speed up the neighbor joining phase & reduce memory usage\n"
" (recommended for >50,000 sequences)\n"
" -n <number> to analyze multiple alignments (phylip format only)\n"
" (use for global bootstrap, with seqboot and CompareToBootstrap.pl)\n"
" -nosupport to not compute support values\n"
" -intree newick_file to set the starting tree(s)\n"
" -intree1 newick_file to use this starting tree for all the alignments\n"
" (for faster global bootstrap on huge alignments)\n"
" -pseudo to use pseudocounts (recommended for highly gapped sequences)\n"
" -gtr -- generalized time-reversible model (nucleotide alignments only)\n"
" -lg -- Le-Gascuel 2008 model (amino acid alignments only)\n"
" -wag -- Whelan-And-Goldman 2001 model (amino acid alignments only)\n"
" -quote -- allow spaces and other restricted characters (but not ' ) in\n"
" sequence names and quote names in the output tree (fasta input only;\n"
" FastTree will not be able to read these trees back in)\n"
" -noml to turn off maximum-likelihood\n"
" -nome to turn off minimum-evolution NNIs and SPRs\n"
" (recommended if running additional ML NNIs with -intree)\n"
" -nome -mllen with -intree to optimize branch lengths for a fixed topology\n"
" -cat # to specify the number of rate categories of sites (default 20)\n"
" or -nocat to use constant rates\n"
" -gamma -- after optimizing the tree under the CAT approximation,\n"
" rescale the lengths to optimize the Gamma20 likelihood\n"
" -constraints constraintAlignment to constrain the topology search\n"
" constraintAlignment should have 1s or 0s to indicates splits\n"
" -expert -- see more options\n"
"For more information, see http://www.microbesonline.org/fasttree/\n";
char *expertUsage =
"FastTree [-nt] [-n 100] [-quote] [-pseudo | -pseudo 1.0]\n"
" [-boot 1000 | -nosupport]\n"
" [-intree starting_trees_file | -intree1 starting_tree_file]\n"
" [-quiet | -nopr]\n"
" [-nni 10] [-spr 2] [-noml | -mllen | -mlnni 10]\n"
" [-mlacc 2] [-cat 20 | -nocat] [-gamma]\n"
" [-slow | -fastest] [-2nd | -no2nd] [-slownni] [-seed 1253] \n"
" [-top | -notop] [-topm 1.0 [-close 0.75] [-refresh 0.8]]\n"
" [-matrix Matrix | -nomatrix] [-nj | -bionj]\n"
" [-lg] [-wag] [-nt] [-gtr] [-gtrrates ac ag at cg ct gt] [-gtrfreq A C G T]\n"
" [ -constraints constraintAlignment [ -constraintWeight 100.0 ] ]\n"
" [-log logfile]\n"
" [ alignment_file ]\n"
" [ -out output_newick_file | > newick_tree]\n"
"\n"
"or\n"
"\n"
"FastTree [-nt] [-matrix Matrix | -nomatrix] [-rawdist] -makematrix [alignment]\n"
" [-n 100] > phylip_distance_matrix\n"
"\n"
" FastTree supports fasta or phylip interleaved alignments\n"
" By default FastTree expects protein alignments, use -nt for nucleotides\n"
" FastTree reads standard input if no alignment file is given\n"
"\n"
"Input/output options:\n"
" -n -- read in multiple alignments in. This only\n"
" works with phylip interleaved format. For example, you can\n"
" use it with the output from phylip's seqboot. If you use -n, FastTree\n"
" will write 1 tree per line to standard output.\n"
" -intree newickfile -- read the starting tree in from newickfile.\n"
" Any branch lengths in the starting trees are ignored.\n"
" -intree with -n will read a separate starting tree for each alignment.\n"
" -intree1 newickfile -- read the same starting tree for each alignment\n"
" -quiet -- do not write to standard error during normal operation (no progress\n"
" indicator, no options summary, no likelihood values, etc.)\n"
" -nopr -- do not write the progress indicator to stderr\n"
" -log logfile -- save intermediate trees so you can extract\n"
" the trees and restart long-running jobs if they crash\n"
" -log also reports the per-site rates (1 means slowest category)\n"
" -quote -- quote sequence names in the output and allow spaces, commas,\n"
" parentheses, and colons in them but not ' characters (fasta files only)\n"
"\n"
"Distances:\n"
" Default: For protein sequences, log-corrected distances and an\n"
" amino acid dissimilarity matrix derived from BLOSUM45\n"
" or for nucleotide sequences, Jukes-Cantor distances\n"
" To specify a different matrix, use -matrix FilePrefix or -nomatrix\n"
" Use -rawdist to turn the log-correction off\n"
" or to use %different instead of Jukes-Cantor\n"
"\n"
" -pseudo [weight] -- Use pseudocounts to estimate distances between\n"
" sequences with little or no overlap. (Off by default.) Recommended\n"
" if analyzing the alignment has sequences with little or no overlap.\n"
" If the weight is not specified, it is 1.0\n"
"\n"
"Topology refinement:\n"
" By default, FastTree tries to improve the tree with up to 4*log2(N)\n"
" rounds of minimum-evolution nearest-neighbor interchanges (NNI),\n"
" where N is the number of unique sequences, 2 rounds of\n"
" subtree-prune-regraft (SPR) moves (also min. evo.), and\n"
" up to 2*log(N) rounds of maximum-likelihood NNIs.\n"
" Use -nni to set the number of rounds of min. evo. NNIs,\n"
" and -spr to set the rounds of SPRs.\n"
" Use -noml to turn off both min-evo NNIs and SPRs (useful if refining\n"
" an approximately maximum-likelihood tree with further NNIs)\n"
" Use -sprlength set the maximum length of a SPR move (default 10)\n"
" Use -mlnni to set the number of rounds of maximum-likelihood NNIs\n"
" Use -mlacc 2 or -mlacc 3 to always optimize all 5 branches at each NNI,\n"
" and to optimize all 5 branches in 2 or 3 rounds\n"
" Use -mllen to optimize branch lengths without ML NNIs\n"
" Use -mllen -nome with -intree to optimize branch lengths on a fixed topology\n"
" Use -slownni to turn off heuristics to avoid constant subtrees (affects both\n"
" ML and ME NNIs)\n"
"\n"
"Maximum likelihood model options:\n"
" -lg -- Le-Gascuel 2008 model instead of (default) Jones-Taylor-Thorton 1992 model (a.a. only)\n"
" -wag -- Whelan-And-Goldman 2001 model instead of (default) Jones-Taylor-Thorton 1992 model (a.a. only)\n"
" -gtr -- generalized time-reversible instead of (default) Jukes-Cantor (nt only)\n"
" -cat # -- specify the number of rate categories of sites (default 20)\n"
" -nocat -- no CAT model (just 1 category)\n"
" -gamma -- after the final round of optimizing branch lengths with the CAT model,\n"
" report the likelihood under the discrete gamma model with the same\n"
" number of categories. FastTree uses the same branch lengths but\n"
" optimizes the gamma shape parameter and the scale of the lengths.\n"
" The final tree will have rescaled lengths. Used with -log, this\n"
" also generates per-site likelihoods for use with CONSEL, see\n"
" GammaLogToPaup.pl and documentation on the FastTree web site.\n"
"\n"
"Support value options:\n"
" By default, FastTree computes local support values by resampling the site\n"
" likelihoods 1,000 times and the Shimodaira Hasegawa test. If you specify -nome,\n"
" it will compute minimum-evolution bootstrap supports instead\n"
" In either case, the support values are proportions ranging from 0 to 1\n"
"\n"
" Use -nosupport to turn off support values or -boot 100 to use just 100 resamples\n"
" Use -seed to initialize the random number generator\n"
"\n"
"Searching for the best join:\n"
" By default, FastTree combines the 'visible set' of fast neighbor-joining with\n"
" local hill-climbing as in relaxed neighbor-joining\n"
" -slow -- exhaustive search (like NJ or BIONJ, but different gap handling)\n"
" -slow takes half an hour instead of 8 seconds for 1,250 proteins\n"
" -fastest -- search the visible set (the top hit for each node) only\n"
" Unlike the original fast neighbor-joining, -fastest updates visible(C)\n"
" after joining A and B if join(AB,C) is better than join(C,visible(C))\n"
" -fastest also updates out-distances in a very lazy way,\n"
" -fastest sets -2nd on as well, use -fastest -no2nd to avoid this\n"
"\n"
"Top-hit heuristics:\n"
" By default, FastTree uses a top-hit list to speed up search\n"
" Use -notop (or -slow) to turn this feature off\n"
" and compare all leaves to each other,\n"
" and all new joined nodes to each other\n"
" -topm 1.0 -- set the top-hit list size to parameter*sqrt(N)\n"
" FastTree estimates the top m hits of a leaf from the\n"
" top 2*m hits of a 'close' neighbor, where close is\n"
" defined as d(seed,close) < 0.75 * d(seed, hit of rank 2*m),\n"
" and updates the top-hits as joins proceed\n"
" -close 0.75 -- modify the close heuristic, lower is more conservative\n"
" -refresh 0.8 -- compare a joined node to all other nodes if its\n"
" top-hit list is less than 80% of the desired length,\n"
" or if the age of the top-hit list is log2(m) or greater\n"
" -2nd or -no2nd to turn 2nd-level top hits heuristic on or off\n"
" This reduces memory usage and running time but may lead to\n"
" marginal reductions in tree quality.\n"
" (By default, -fastest turns on -2nd.)\n"
"\n"
"Join options:\n"
" -nj: regular (unweighted) neighbor-joining (default)\n"
" -bionj: weighted joins as in BIONJ\n"
" FastTree will also weight joins during NNIs\n"
"\n"
"Constrained topology search options:\n"
" -constraints alignmentfile -- an alignment with values of 0, 1, and -\n"
" Not all sequences need be present. A column of 0s and 1s defines a\n"
" constrained split. Some constraints may be violated\n"
" (see 'violating constraints:' in standard error).\n"
" -constraintWeight -- how strongly to weight the constraints. A value of 1\n"
" means a penalty of 1 in tree length for violating a constraint\n"
" Default: 100.0\n"
"\n"
"For more information, see http://www.microbesonline.org/fasttree/\n"
" or the comments in the source code\n";
;
#define MAXCODES 20
#define NOCODE 127
/* Note -- sequence lines longer than BUFFER_SIZE are
allowed, but FASTA header lines must be within this limit */
#define BUFFER_SIZE 5000
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
typedef struct {
int nPos;
int nSeq;
char **names;
char **seqs;
int nSaved; /* actual allocated size of names and seqs */
} alignment_t;
/* For each position in a profile, we have a weight (% non-gapped) and a
frequency vector. (If using a matrix, the frequency vector is in eigenspace).
We also store codes for simple profile positions (all gaps or only 1 value)
If weight[pos] > 0 && codes[pos] == NOCODE then we store the vector
vectors itself is sets of nCodes long, so the vector for the ith nonconstant position
starts at &vectors[nCodes*i]
To speed up comparison of outprofile to a sequence or other simple profile, we also
(for outprofiles) store codeDist[iPos*nCodes+k] = dist(k,profile[iPos])
For constraints, we store a vector of nOn and nOff
If not using constraints, those will be NULL
*/
typedef struct {
/* alignment profile */
numeric_t *weights;
unsigned char *codes;
numeric_t *vectors; /* NULL if no non-constant positions, e.g. for leaves */
int nVectors;
numeric_t *codeDist; /* Optional -- distance to each code at each position */
/* constraint profile */
int *nOn;
int *nOff;
} profile_t;
/* A visible node is a pair of nodes i, j such that j is the best hit of i,
using the neighbor-joining criterion, at the time the comparison was made,
or approximately so since then.
Note that variance = dist because in BIONJ, constant factors of variance do not matter,
and because we weight ungapped sequences higher naturally when averaging profiles,
so we do not take this into account in the computation of "lambda" for BIONJ.
For the top-hit list heuristic, if the top hit list becomes "too short",
we store invalid entries with i=j=-1 and dist/criterion very high.
*/
typedef struct {
int i, j;
numeric_t weight; /* Total product of weights (maximum value is nPos)
This is needed for weighted joins and for pseudocounts,
but not in most other places.
For example, it is not maintained by the top hits code */
numeric_t dist; /* The uncorrected distance (includes diameter correction) */
numeric_t criterion; /* changes when we update the out-profile or change nActive */
} besthit_t;
typedef struct {
int nChild;
int child[3];
} children_t;
typedef struct {
/* Distances between amino acids */
numeric_t distances[MAXCODES][MAXCODES];
/* Inverse of the eigenvalue matrix, for rotating a frequency vector
into eigenspace so that profile similarity computations are
O(alphabet) not O(alphabet*alphabet) time.
*/
numeric_t eigeninv[MAXCODES][MAXCODES];
numeric_t eigenval[MAXCODES]; /* eigenvalues */
/* eigentot=eigeninv times the all-1s frequency vector
useful for normalizing rotated frequency vectors
*/
numeric_t eigentot[MAXCODES];
/* codeFreq is the transpose of the eigeninv matrix is
the rotated frequency vector for each code */
numeric_t codeFreq[MAXCODES][MAXCODES];
numeric_t gapFreq[MAXCODES];
} distance_matrix_t;
/* A transition matrix gives the instantaneous rate of change of frequencies
df/dt = M . f
which is solved by
f(t) = exp(M) . f(0)
and which is not a symmetric matrix because of
non-uniform stationary frequencies stat, so that
M stat = 0
M(i,j) is instantaneous rate of j -> i, not of i -> j
S = diag(sqrt(stat)) is a correction so that
M' = S**-1 M S is symmetric
Let W L W**-1 = M' be an eigendecomposition of M'
Because M' is symmetric, W can be a rotation, and W**-1 = t(W)
Set V = S*W
M = V L V**-1 is an eigendecomposition of M
Note V**-1 = W**-1 S**-1 = t(W) S**-1
Evolution by time t is given by
exp(M*t) = V exp(L*t) V**-1
P(A & B | t) = B . exp(M*t) . (A * stat)
note this is *not* the same as P(A->B | t)
and we can reduce some of the computations from O(a**2) to O(a) time,
where a is the alphabet size, by storing frequency vectors as
t(V) . f = t(W) . t(S) . f
Then
P(f0 & f1 | t) = f1 . exp(M*t) . f0 * (f0 . stat) = sum(r0j * r1j * exp(l_j*t))
where r0 and r1 are the transformed vectors
Posterior distribution of P given children f0 and f1 is given by
P(i | f0, f1, t0, t1) = stat * P(i->f0 | t0) * P(i->f1 | t1)
= P(i & f0 | t0) * P(i & f1 | t1) / stat
~ (V . exp(t0*L) . r0) * (V . exp(t1*L) . r1) / stat
When normalize this posterior distribution (to sum to 1), divide by stat,
and transform by t(V) -- this is the "profile" of internal nodes
To eliminate the O(N**2) step of transforming by t(V), if the posterior
distribution of an amino acid is near 1 then we can approximate it by
P(i) ~= (i==A) * w + nearP(i) * (1-w), where
w is fit so that P(i==A) is correct
nearP = Posterior(i | i, i, 0.1, 0.1) [0.1 is an arbitrary choice]
and we confirm that the approximation works well before we use it.
Given this parameter w we can set
rotated_posterior = rotation(w * (i==A)/stat + (1-w) * nearP/stat)
= codeFreq(A) * w/stat(A) + nearFreq(A) * (1-w)
*/
typedef struct {
numeric_t stat[MAXCODES]; /* The stationary distribution */
numeric_t statinv[MAXCODES]; /* 1/stat */
/* the eigenmatrix, with the eigenvectors as columns and rotations of individual
characters as rows. Also includes a NOCODE entry for gaps */
numeric_t codeFreq[NOCODE+1][MAXCODES];
numeric_t eigeninv[MAXCODES][MAXCODES]; /* Inverse of eigenmatrix */
numeric_t eigeninvT[MAXCODES][MAXCODES]; /* transpose of eigeninv */
numeric_t eigenval[MAXCODES]; /* Eigenvalues */
/* These are for approximate posteriors (off by default) */
numeric_t nearP[MAXCODES][MAXCODES]; /* nearP[i][j] = P(parent=j | both children are i, both lengths are 0.1 */
numeric_t nearFreq[MAXCODES][MAXCODES]; /* rotation of nearP/stat */
} transition_matrix_t;
typedef struct {
int nRateCategories;
numeric_t *rates; /* 1 per rate category */
unsigned int *ratecat; /* 1 category per position */
} rates_t;
typedef struct {
/* The input */
int nSeq;
int nPos;
char **seqs; /* the aligment sequences array (not reallocated) */
distance_matrix_t *distance_matrix; /* a pointer (not reallocated), or NULL if using %identity distance */
transition_matrix_t *transmat; /* a pointer (is allocated), or NULL for Jukes-Cantor */
/* Topological constraints are represented for each sequence as binary characters
with values of '0', '1', or '-' (for missing data)
Sequences that have no constraint may have a NULL string
*/
int nConstraints;
char **constraintSeqs;
/* The profile data structures */
int maxnode; /* The next index to allocate */
int maxnodes; /* Space allocated in data structures below */
profile_t **profiles; /* Profiles of leaves and intermediate nodes */
numeric_t *diameter; /* To correct for distance "up" from children (if any) */
numeric_t *varDiameter; /* To correct variances for distance "up" */
numeric_t *selfdist; /* Saved for use in some formulas */
numeric_t *selfweight; /* Saved for use in some formulas */
/* Average profile of all active nodes, the "outprofile"
* If all inputs are ungapped, this has weight 1 (not nSequences) at each position
* The frequencies all sum to one (or that is implied by the eigen-representation)
*/
profile_t *outprofile;
double totdiam;
/* We sometimes use stale out-distances, so we remember what nActive was */
numeric_t *outDistances; /* Sum of distances to other active (parent==-1) nodes */
int *nOutDistActive; /* What nActive was when this outDistance was computed */
/* the inferred tree */
int root; /* index of the root. Unlike other internal nodes, it has 3 children */
int *parent; /* -1 or index of parent */
children_t *child;
numeric_t *branchlength; /* Distance to parent */
numeric_t *support; /* 1 for high-confidence nodes */
/* auxilliary data for maximum likelihood (defaults to 1 category of rate=1.0) */
rates_t rates;
} NJ_t;
/* Uniquify sequences in an alignment -- map from indices
in the alignment to unique indicies in a NJ_t
*/
typedef struct {
int nSeq;
int nUnique;
int *uniqueFirst; /* iUnique -> iAln */
int *alnNext; /* iAln -> next, or -1 */
int *alnToUniq; /* iAln -> iUnique, or -1 if another was the exemplar */
char **uniqueSeq; /* indexed by iUniq -- points to strings allocated elsewhere */
} uniquify_t;
/* Describes which switch to do */
typedef enum {ABvsCD,ACvsBD,ADvsBC} nni_t;
/* A list of these describes a chain of NNI moves in a rooted tree,
making up, in total, an SPR move
*/
typedef struct {
int nodes[2];
double deltaLength; /* change in tree length for this step (lower is better) */
} spr_step_t;
/* Keep track of hits for the top-hits heuristic without wasting memory
j = -1 means empty
If j is an inactive node, this may be replaced by that node's parent (and dist recomputed)
*/
typedef struct {
int j;
numeric_t dist;
} hit_t;
typedef struct {
int nHits; /* the allocated and desired size; some of them may be empty */
hit_t *hits;
int hitSource; /* where to refresh hits from if a 2nd-level top-hit list, or -1 */
int age; /* number of joins since a refresh */
} top_hits_list_t;
typedef struct {
int m; /* size of a full top hits list, usually sqrt(N) */
int q; /* size of a 2nd-level top hits, usually sqrt(m) */
int maxnodes;
top_hits_list_t *top_hits_lists; /* one per node */
hit_t *visible; /* the "visible" (very best) hit for each node */
/* The top-visible set is a subset, usually of size m, of the visible set --
it is the set of joins to select from
Each entry is either a node whose visible set entry has a good (low) criterion,
or -1 for empty, or is an obsolete node (which is effectively the same).
Whenever we update the visible set, should also call UpdateTopVisible()
which ensures that none of the topvisible set are stale (that is, they
all point to an active node).
*/
int nTopVisible; /* nTopVisible = m * topvisibleMult */
int *topvisible;
int topvisibleAge; /* joins since the top-visible list was recomputed */
#ifdef OPENMP
/* 1 lock to read or write any top hits list, no thread grabs more than one */
omp_lock_t *locks;
#endif
} top_hits_t;
/* Global variables */
/* Options */
int verbose = 1;
int showProgress = 1;
int slow = 0;
int fastest = 0;
bool useTopHits2nd = false; /* use the second-level top hits heuristic? */
int bionj = 0;
double tophitsMult = 1.0; /* 0 means compare nodes to all other nodes */
double tophitsClose = -1.0; /* Parameter for how close is close; also used as a coverage req. */
double topvisibleMult = 1.5; /* nTopVisible = m * topvisibleMult; 1 or 2 did not make much difference
in either running time or accuracy so I chose a compromise. */
double tophitsRefresh = 0.8; /* Refresh if fraction of top-hit-length drops to this */
double tophits2Mult = 1.0; /* Second-level top heuristic -- only with -fastest */
int tophits2Safety = 3; /* Safety factor for second level of top-hits heuristic */
double tophits2Refresh = 0.6; /* Refresh 2nd-level top hits if drops down to this fraction of length */
double staleOutLimit = 0.01; /* nActive changes by at most this amount before we recompute
an out-distance. (Only applies if using the top-hits heuristic) */
double fResetOutProfile = 0.02; /* Recompute out profile from scratch if nActive has changed
by more than this proportion, and */
int nResetOutProfile = 200; /* nActive has also changed more than this amount */
int nCodes=20; /* 20 if protein, 4 if nucleotide */
bool useMatrix=true; /* If false, use %different as the uncorrected distance */
bool logdist = true; /* If true, do a log-correction (scoredist-like or Jukes-Cantor)
but only during NNIs and support values, not during neighbor-joining */
double pseudoWeight = 0.0; /* The weight of pseudocounts to avoid artificial long branches when
nearby sequences in the tree have little or no overlap
(off by default). The prior distance is based on
all overlapping positions among the quartet or triplet under
consideration. The log correction takes place after the
pseudocount is used. */
double constraintWeight = 100.0;/* Cost of violation of a topological constraint in evolutionary distance
or likelihood */
double MEMinDelta = 1.0e-4; /* Changes of less than this in tree-length are discounted for
purposes of identifying fixed subtrees */
bool fastNNI = true;
bool gammaLogLk = false; /* compute gamma likelihood without reoptimizing branch lengths? */
/* Maximum likelihood options and constants */
/* These are used to rescale likelihood values and avoid taking a logarithm at each position */
const double LkUnderflow = 1.0e-4;
const double LkUnderflowInv = 1.0e4;
const double LogLkUnderflow = 9.21034037197618; /* -log(LkUnderflowInv) */
const double Log2 = 0.693147180559945;
/* These are used to limit the optimization of branch lengths.
Also very short branch lengths can create numerical problems.
In version 2.1.7, the minimum branch lengths (MLMinBranchLength and MLMinRelBranchLength)
were increased to prevent numerical problems in rare cases.
In version 2.1.8, to provide useful branch lengths for genome-wide alignments,
the minimum branch lengths were dramatically decreased if USE_DOUBLE is defined.
*/
#ifndef USE_DOUBLE
const double MLMinBranchLengthTolerance = 1.0e-4; /* absolute tolerance for optimizing branch lengths */
const double MLFTolBranchLength = 0.001; /* fractional tolerance for optimizing branch lengths */
const double MLMinBranchLength = 5.0e-4; /* minimum value for branch length */
const double MLMinRelBranchLength = 2.5e-4; /* minimum of rate * length */
const double fPostTotalTolerance = 1.0e-10; /* posterior vector must sum to at least this before rescaling */
#else
const double MLMinBranchLengthTolerance = 1.0e-9;
const double MLFTolBranchLength = 0.001;
const double MLMinBranchLength = 5.0e-9;
const double MLMinRelBranchLength = 2.5e-9;
const double fPostTotalTolerance = 1.0e-20;
#endif
int mlAccuracy = 1; /* Rounds of optimization of branch lengths; 1 means do 2nd round only if close */
double closeLogLkLimit = 5.0; /* If partial optimization of an NNI looks like it would decrease the log likelihood
by this much or more then do not optimize it further */
double treeLogLkDelta = 0.1; /* Give up if tree log-lk changes by less than this; NNIs that change
likelihood by less than this also are considered unimportant
by some heuristics */
bool exactML = true; /* Exact or approximate posterior distributions for a.a.s */
double approxMLminf = 0.95; /* Only try to approximate posterior distributions if max. value is at least this high */
double approxMLminratio = 2/3.0;/* Ratio of approximated/true posterior values must be at least this high */
double approxMLnearT = 0.2; /* 2nd component of near-constant posterior distribution uses this time scale */
const int nDefaultRateCats = 20;
/* Performance and memory usage */
long profileOps = 0; /* Full profile-based distance operations */
long outprofileOps = 0; /* How many of profileOps are comparisons to outprofile */
long seqOps = 0; /* Faster leaf-based distance operations */
long profileAvgOps = 0; /* Number of profile-average steps */
long nHillBetter = 0; /* Number of hill-climbing steps */
long nCloseUsed = 0; /* Number of "close" neighbors we avoid full search for */
long nClose2Used = 0; /* Number of "close" neighbors we use 2nd-level top hits for */
long nRefreshTopHits = 0; /* Number of full-blown searches (interior nodes) */
long nVisibleUpdate = 0; /* Number of updates of the visible set */
long nNNI = 0; /* Number of NNI changes performed */
long nSPR = 0; /* Number of SPR changes performed */
long nML_NNI = 0; /* Number of max-lik. NNI changes performed */
long nSuboptimalSplits = 0; /* # of splits that are rejected given final tree (during bootstrap) */
long nSuboptimalConstrained = 0; /* Bad splits that are due to constraints */
long nConstraintViolations = 0; /* Number of constraint violations */
long nProfileFreqAlloc = 0;
long nProfileFreqAvoid = 0;
long szAllAlloc = 0;
long mymallocUsed = 0; /* useful allocations by mymalloc */
long maxmallocHeap = 0; /* Maximum of mi.arena+mi.hblkhd from mallinfo (actual mem usage) */
long nLkCompute = 0; /* # of likelihood computations for pairs of probability vectors */
long nPosteriorCompute = 0; /* # of computations of posterior probabilities */
long nAAPosteriorExact = 0; /* # of times compute exact AA posterior */
long nAAPosteriorRough = 0; /* # of times use rough approximation */
long nStarTests = 0; /* # of times we use star test to avoid testing an NNI */
/* Protein character set */
unsigned char *codesStringAA = (unsigned char*) "ARNDCQEGHILKMFPSTWYV";
unsigned char *codesStringNT = (unsigned char*) "ACGT";
unsigned char *codesString = NULL;
distance_matrix_t *ReadDistanceMatrix(char *prefix);
void SetupDistanceMatrix(/*IN/OUT*/distance_matrix_t *); /* set eigentot, codeFreq, gapFreq */
void ReadMatrix(char *filename, /*OUT*/numeric_t codes[MAXCODES][MAXCODES], bool check_codes);
void ReadVector(char *filename, /*OUT*/numeric_t codes[MAXCODES]);
alignment_t *ReadAlignment(/*READ*/FILE *fp, bool bQuote); /* Returns a list of strings (exits on failure) */
alignment_t *FreeAlignment(alignment_t *); /* returns NULL */
void FreeAlignmentSeqs(/*IN/OUT*/alignment_t *);
/* Takes as input the transpose of the matrix V, with i -> j
This routine takes care of setting the diagonals
*/
transition_matrix_t *CreateTransitionMatrix(/*IN*/double matrix[MAXCODES][MAXCODES],
/*IN*/double stat[MAXCODES]);
transition_matrix_t *CreateGTR(double *gtrrates/*ac,ag,at,cg,ct,gt*/, double *gtrfreq/*ACGT*/);
/* For converting profiles from 1 rotation to another, or converts NULL to NULL */
distance_matrix_t *TransMatToDistanceMat(transition_matrix_t *transmat);
/* Allocates memory, initializes leaf profiles */
NJ_t *InitNJ(char **sequences, int nSeqs, int nPos,
/*IN OPTIONAL*/char **constraintSeqs, int nConstraints,
/*IN OPTIONAL*/distance_matrix_t *,
/*IN OPTIONAL*/transition_matrix_t *);
NJ_t *FreeNJ(NJ_t *NJ); /* returns NULL */
void FastNJ(/*IN/OUT*/NJ_t *NJ); /* Does the joins */
void ReliabilityNJ(/*IN/OUT*/NJ_t *NJ, int nBootstrap); /* Estimates the reliability of the joins */
/* nni_stats_t is meaningless for leaves and root, so all of those entries
will just be high (for age) or 0 (for delta)
*/
typedef struct {
int age; /* number of rounds since this node was modified by an NNI */
int subtreeAge; /* number of rounds since self or descendent had a significant improvement */
double delta; /* improvement in score for this node (or 0 if no change) */
double support; /* improvement of score for self over better of alternatives */
} nni_stats_t;
/* One round of nearest-neighbor interchanges according to the
minimum-evolution or approximate maximum-likelihood criterion.
If doing maximum likelihood then this modifies the branch lengths.
age is the # of rounds since a node was NNId
Returns the # of topological changes performed
*/
int NNI(/*IN/OUT*/NJ_t *NJ, int iRound, int nRounds, bool useML,
/*IN/OUT*/nni_stats_t *stats,
/*OUT*/double *maxDeltaCriterion);
nni_stats_t *InitNNIStats(NJ_t *NJ);
nni_stats_t *FreeNNIStats(nni_stats_t *, NJ_t *NJ); /* returns NULL */
/* One round of subtree-prune-regraft moves (minimum evolution) */
void SPR(/*IN/OUT*/NJ_t *NJ, int maxSPRLength, int iRound, int nRounds);
/* Recomputes all branch lengths by minimum evolution criterion*/
void UpdateBranchLengths(/*IN/OUT*/NJ_t *NJ);
/* Recomputes all branch lengths and, optionally, internal profiles */
double TreeLength(/*IN/OUT*/NJ_t *NJ, bool recomputeProfiles);
typedef struct {
int nBadSplits;
int nConstraintViolations;
int nBadBoth;
int nSplits;
/* How much length would be reduce or likelihood would be increased by the
best NNI we find (the worst "miss") */
double dWorstDeltaUnconstrained;
double dWorstDeltaConstrained;
} SplitCount_t;
void TestSplitsMinEvo(NJ_t *NJ, /*OUT*/SplitCount_t *splitcount);
/* Sets SH-like support values if nBootstrap>0 */
void TestSplitsML(/*IN/OUT*/NJ_t *NJ, /*OUT*/SplitCount_t *splitcount, int nBootstrap);
/* Pick columns for resampling, stored as returned_vector[iBoot*nPos + j] */
int *ResampleColumns(int nPos, int nBootstrap);
/* Use out-profile and NJ->totdiam to recompute out-distance for node iNode
Only does this computation if the out-distance is "stale" (nOutDistActive[iNode] != nActive)
Note "IN/UPDATE" for NJ always means that we may update out-distances but otherwise
make no changes.
*/
void SetOutDistance(/*IN/UPDATE*/NJ_t *NJ, int iNode, int nActive);
/* Always sets join->criterion; may update NJ->outDistance and NJ->nOutDistActive,
assumes join's weight and distance are already set,
and that the constraint penalty (if any) is included in the distance
*/
void SetCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join);
/* Computes weight and distance (which includes the constraint penalty)
and then sets the criterion (maybe update out-distances)
*/
void SetDistCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join);
/* If join->i or join->j are inactive nodes, replaces them with their active ancestors.
After doing this, if i == j, or either is -1, sets weight to 0 and dist and criterion to 1e20
and returns false (not a valid join)
Otherwise, if i or j changed, recomputes the distance and criterion.
Note that if i and j are unchanged then the criterion could be stale
If bUpdateDist is false, and i or j change, then it just sets dist to a negative number
*/
bool UpdateBestHit(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join,
bool bUpdateDist);
/* This recomputes the criterion, or returns false if the visible node
is no longer active.
*/
bool GetVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits,
int iNode, /*OUT*/besthit_t *visible);
int ActiveAncestor(/*IN*/NJ_t *NJ, int node);
/* Compute the constraint penalty for a join. This is added to the "distance"
by SetCriterion */
int JoinConstraintPenalty(/*IN*/NJ_t *NJ, int node1, int node2);
int JoinConstraintPenaltyPiece(NJ_t *NJ, int node1, int node2, int iConstraint);
/* Helper function for computing the number of constraints violated by
a split, represented as counts of on and off on each side */
int SplitConstraintPenalty(int nOn1, int nOff1, int nOn2, int nOff2);
/* Reports the (min. evo.) support for the (1,2) vs. (3,4) split
col[iBoot*nPos+j] is column j for bootstrap iBoot
*/
double SplitSupport(profile_t *p1, profile_t *p2, profile_t *p3, profile_t *p4,
/*OPTIONAL*/distance_matrix_t *dmat,
int nPos,
int nBootstrap,
int *col);
/* Returns SH-like support given resampling spec. (in col) and site likelihods
for the three quartets
*/
double SHSupport(int nPos, int nBoostrap, int *col, double loglk[3], double *site_likelihoods[3]);
profile_t *SeqToProfile(/*IN/OUT*/NJ_t *NJ,
char *seq, int nPos,
/*OPTIONAL*/char *constraintSeqs, int nConstraints,
int iNode,
unsigned long counts[256]);
/* ProfileDist and SeqDist only set the dist and weight fields
If using an outprofile, use the second argument of ProfileDist
for better performance.
These produce uncorrected distances.
*/
void ProfileDist(profile_t *profile1, profile_t *profile2, int nPos,
/*OPTIONAL*/distance_matrix_t *distance_matrix,
/*OUT*/besthit_t *hit);
void SeqDist(unsigned char *codes1, unsigned char *codes2, int nPos,
/*OPTIONAL*/distance_matrix_t *distance_matrix,
/*OUT*/besthit_t *hit);
/* Computes all pairs of profile distances, applies pseudocounts
if pseudoWeight > 0, and applies log-correction if logdist is true.
The lower index is compared to the higher index, e.g. for profiles
A,B,C,D the comparison will be as in quartet_pair_t
*/
typedef enum {qAB,qAC,qAD,qBC,qBD,qCD} quartet_pair_t;
void CorrectedPairDistances(profile_t **profiles, int nProfiles,
/*OPTIONAL*/distance_matrix_t *distance_matrix,
int nPos,
/*OUT*/double *distances);
/* output is indexed by nni_t
To ensure good behavior while evaluating a subtree-prune-regraft move as a series
of nearest-neighbor interchanges, this uses a distance-ish model of constraints,
as given by PairConstraintDistance(), rather than
counting the number of violated splits (which is what FastTree does
during neighbor-joining).
Thus, penalty values may well be >0 even if no constraints are violated, but the
relative scores for the three NNIs will be correct.
*/
void QuartetConstraintPenalties(profile_t *profiles[4], int nConstraints, /*OUT*/double d[3]);
double PairConstraintDistance(int nOn1, int nOff1, int nOn2, int nOff2);
/* the split is consistent with the constraint if any of the profiles have no data
or if three of the profiles have the same uniform value (all on or all off)
or if AB|CD = 00|11 or 11|00 (all uniform)
*/
bool SplitViolatesConstraint(profile_t *profiles[4], int iConstraint);
/* If false, no values were set because this constraint was not relevant.
output is for the 3 splits
*/
bool QuartetConstraintPenaltiesPiece(profile_t *profiles[4], int iConstraint, /*OUT*/double penalty[3]);
/* Apply Jukes-Cantor or scoredist-like log(1-d) transform
to correct the distance for multiple substitutions.
*/
double LogCorrect(double distance);
/* AverageProfile is used to do a weighted combination of nodes
when doing a join. If weight is negative, then the value is ignored and the profiles
are averaged. The weight is *not* adjusted for the gap content of the nodes.
Also, the weight does not affect the representation of the constraints
*/
profile_t *AverageProfile(profile_t *profile1, profile_t *profile2,
int nPos, int nConstraints,
distance_matrix_t *distance_matrix,
double weight1);
/* PosteriorProfile() is like AverageProfile() but it computes posterior probabilities
rather than an average
*/
profile_t *PosteriorProfile(profile_t *profile1, profile_t *profile2,
double len1, double len2,
/*OPTIONAL*/transition_matrix_t *transmat,
rates_t *rates,
int nPos, int nConstraints);
/* Set a node's profile from its children.
Deletes the previous profile if it exists
Use -1.0 for a balanced join
Fails unless the node has two children (e.g., no leaves or root)
*/
void SetProfile(/*IN/OUT*/NJ_t *NJ, int node, double weight1);
/* OutProfile does an unweighted combination of nodes to create the
out-profile. It always sets code to NOCODE so that UpdateOutProfile
can work.
*/
profile_t *OutProfile(profile_t **profiles, int nProfiles,
int nPos, int nConstraints,
distance_matrix_t *distance_matrix);
void UpdateOutProfile(/*UPDATE*/profile_t *out, profile_t *old1, profile_t *old2,
profile_t *new, int nActiveOld,
int nPos, int nConstraints,
distance_matrix_t *distance_matrix);
profile_t *NewProfile(int nPos, int nConstraints); /* returned has no vectors */
profile_t *FreeProfile(profile_t *profile, int nPos, int nConstraints); /* returns NULL */
void AllocRateCategories(/*IN/OUT*/rates_t *rates, int nRateCategories, int nPos);
/* f1 can be NULL if code1 != NOCODE, and similarly for f2
Or, if (say) weight1 was 0, then can have code1==NOCODE *and* f1==NULL
In that case, returns an arbitrary large number.
*/
double ProfileDistPiece(unsigned int code1, unsigned int code2,
numeric_t *f1, numeric_t *f2,
/*OPTIONAL*/distance_matrix_t *dmat,
/*OPTIONAL*/numeric_t *codeDist2);
/* Adds (or subtracts, if weight is negative) fIn/codeIn from fOut
fOut is assumed to exist (as from an outprofile)
do not call unless weight of input profile > 0
*/
void AddToFreq(/*IN/OUT*/numeric_t *fOut, double weight,
unsigned int codeIn, /*OPTIONAL*/numeric_t *fIn,
/*OPTIONAL*/distance_matrix_t *dmat);
/* Divide the vector (of length nCodes) by a constant
so that the total (unrotated) frequency is 1.0 */
void NormalizeFreq(/*IN/OUT*/numeric_t *freq, distance_matrix_t *distance_matrix);
/* Allocate, if necessary, and recompute the codeDist*/
void SetCodeDist(/*IN/OUT*/profile_t *profile, int nPos, distance_matrix_t *dmat);
/* The allhits list contains the distances of the node to all other active nodes
This is useful for the "reset" improvement to the visible set
Note that the following routines do not handle the tophits heuristic
and assume that out-distances are up to date.
*/
void SetBestHit(int node, NJ_t *NJ, int nActive,
/*OUT*/besthit_t *bestjoin,
/*OUT OPTIONAL*/besthit_t *allhits);
void ExhaustiveNJSearch(NJ_t *NJ, int nActive, /*OUT*/besthit_t *bestjoin);
/* Searches the visible set */
void FastNJSearch(NJ_t *NJ, int nActive, /*UPDATE*/besthit_t *visible, /*OUT*/besthit_t *bestjoin);
/* Subroutines for handling the tophits heuristic */
top_hits_t *InitTopHits(NJ_t *NJ, int m);
top_hits_t *FreeTopHits(top_hits_t *tophits); /* returns NULL */
/* Before we do any joins -- sets tophits and visible
NJ may be modified by setting out-distances
*/
void SetAllLeafTopHits(/*IN/UPDATE*/NJ_t *NJ, /*IN/OUT*/top_hits_t *tophits);
/* Find the best join to do. */
void TopHitNJSearch(/*IN/UPDATE*/NJ_t *NJ,
int nActive,
/*IN/OUT*/top_hits_t *tophits,
/*OUT*/besthit_t *bestjoin);
/* Returns the best hit within top hits
NJ may be modified because it updates out-distances if they are too stale
Does *not* update visible set
*/
void GetBestFromTopHits(int iNode, /*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN*/top_hits_t *tophits,
/*OUT*/besthit_t *bestjoin);
/* visible set is modifiable so that we can reset it more globally when we do
a "refresh", but we also set the visible set for newnode and do any
"reset" updates too. And, we update many outdistances.
*/
void TopHitJoin(int newnode,
/*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN/OUT*/top_hits_t *tophits);
/* Sort the input besthits by criterion
and save the best nOut hits as a new array in top_hits_lists
Does not update criterion or out-distances
Ignores (silently removes) hit to self
Saved list may be shorter than requested if there are insufficient entries
*/
void SortSaveBestHits(int iNode, /*IN/SORT*/besthit_t *besthits,
int nIn, int nOut,
/*IN/OUT*/top_hits_t *tophits);
/* Given candidate hits from one node, "transfer" them to another node:
Stores them in a new place in the same order
searches up to active nodes if hits involve non-active nodes
If update flag is set, it also recomputes distance and criterion
(and ensures that out-distances are updated); otherwise
it sets dist to -1e20 and criterion to 1e20
*/
void TransferBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive,
int iNode,
/*IN*/besthit_t *oldhits,
int nOldHits,
/*OUT*/besthit_t *newhits,
bool updateDistance);
/* Create best hit objects from 1 or more hits. Do not update out-distances or set criteria */
void HitsToBestHits(/*IN*/hit_t *hits, int nHits, int iNode, /*OUT*/besthit_t *newhits);
besthit_t HitToBestHit(int i, hit_t hit);
/* Given a set of besthit entries,
look for improvements to the visible set of the j entries.
Updates out-distances as it goes.
Also replaces stale nodes with this node, because a join is usually
how this happens (i.e. it does not need to walk up to ancestors).
Note this calls UpdateTopVisible() on any change
*/
void UpdateVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN*/besthit_t *tophitsNode,
int nTopHits,
/*IN/OUT*/top_hits_t *tophits);
/* Update the top-visible list to perhaps include this hit (O(sqrt(N)) time) */
void UpdateTopVisible(/*IN*/NJ_t * NJ, int nActive,
int iNode, /*IN*/hit_t *hit,
/*IN/OUT*/top_hits_t *tophits);
/* Recompute the top-visible subset of the visible set */
void ResetTopVisible(/*IN/UPDATE*/NJ_t *NJ,
int nActive,
/*IN/OUT*/top_hits_t *tophits);
/* Make a shorter list with only unique entries.
Replaces any "dead" hits to nodes that have parents with their active ancestors
and ignores any that become dead.
Updates all criteria.
Combined gets sorted by i & j
The returned list is allocated to nCombined even though only *nUniqueOut entries are filled
*/
besthit_t *UniqueBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN/SORT*/besthit_t *combined, int nCombined,
/*OUT*/int *nUniqueOut);
nni_t ChooseNNI(profile_t *profiles[4],
/*OPTIONAL*/distance_matrix_t *dmat,
int nPos, int nConstraints,
/*OUT*/double criteria[3]); /* The three internal branch lengths or log likelihoods*/
/* length[] is ordered as described by quartet_length_t, but after we do the swap
of B with C (to give AC|BD) or B with D (to get AD|BC), if that is the returned choice
bFast means do not consider NNIs if AB|CD is noticeably better than the star topology
(as implemented by MLQuartetOptimize).
If there are constraints, then the constraint penalty is included in criteria[]
*/
nni_t MLQuartetNNI(profile_t *profiles[4],
/*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
int nPos, int nConstraints,
/*OUT*/double criteria[3], /* The three potential quartet log-likelihoods */
/*IN/OUT*/numeric_t length[5],
bool bFast);
void OptimizeAllBranchLengths(/*IN/OUT*/NJ_t *NJ);
double TreeLogLk(/*IN*/NJ_t *NJ, /*OPTIONAL OUT*/double *site_loglk);
double MLQuartetLogLk(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD,
int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
/*IN*/double branch_lengths[5],
/*OPTIONAL OUT*/double *site_likelihoods);
/* Given a topology and branch lengths, estimate rates & recompute profiles */
void SetMLRates(/*IN/OUT*/NJ_t *NJ, int nRateCategories);
/* Returns a set of nRateCategories potential rates; the caller must free it */
numeric_t *MLSiteRates(int nRateCategories);
/* returns site_loglk so that
site_loglk[nPos*iRate + j] is the log likelihood of site j with rate iRate
The caller must free it.
*/
double *MLSiteLikelihoodsByRate(/*IN*/NJ_t *NJ, /*IN*/numeric_t *rates, int nRateCategories);
typedef struct {
double mult; /* multiplier for the rates / divisor for the tree-length */
double alpha;
int nPos;
int nRateCats;
numeric_t *rates;
double *site_loglk;
} siteratelk_t;
double GammaLogLk(/*IN*/siteratelk_t *s, /*OPTIONAL OUT*/double *gamma_loglk_sites);
/* Input site_loglk must be for each rate. Note that FastTree does not reoptimize
the branch lengths under the Gamma model -- it optimizes the overall scale.
Reports the gamma log likelihhod (and logs site likelihoods if fpLog is set),
and reports the rescaling value.
*/
double RescaleGammaLogLk(int nPos, int nRateCats,
/*IN*/numeric_t *rates, /*IN*/double *site_loglk,
/*OPTIONAL*/FILE *fpLog);
/* P(value<=x) for the gamma distribution with shape parameter alpha and scale 1/alpha */
double PGamma(double x, double alpha);
/* Given a topology and branch lengths, optimize GTR rates and quickly reoptimize branch lengths
If gtrfreq is NULL, then empirical frequencies are used
*/
void SetMLGtr(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL IN*/double *gtrfreq, /*OPTIONAL WRITE*/FILE *fpLog);
/* P(A & B | len) = P(B | A, len) * P(A)
If site_likelihoods is present, multiplies those values by the site likelihood at each point
(Note it does not handle underflow)
*/
double PairLogLk(/*IN*/profile_t *p1, /*IN*/profile_t *p2, double length,
int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
/*OPTIONAL IN/OUT*/double *site_likelihoods);
/* Branch lengths for 4-taxon tree ((A,B),C,D); I means internal */
typedef enum {LEN_A,LEN_B,LEN_C,LEN_D,LEN_I} quartet_length_t;
typedef struct {
int nPos;
transition_matrix_t *transmat;
rates_t *rates;
int nEval; /* number of likelihood evaluations */
/* The pair to optimize */
profile_t *pair1;
profile_t *pair2;
} quartet_opt_t;
double PairNegLogLk(double x, void *data); /* data must be a quartet_opt_t */
typedef struct {
NJ_t *NJ;
double freq[4];
double rates[6];
int iRate; /* which rate to set x from */
FILE *fpLog; /* OPTIONAL WRITE */
} gtr_opt_t;
/* Returns -log_likelihood for the tree with the given rates
data must be a gtr_opt_t and x is used to set rate iRate
Does not recompute profiles -- assumes that the caller will
*/
double GTRNegLogLk(double x, void *data);
/* Returns the resulting log likelihood. Optionally returns whether other
topologies should be abandoned, based on the difference between AB|CD and
the "star topology" (AB|CD with a branch length of MLMinBranchLength) exceeding
closeLogLkLimit.
If bStarTest is passed in, it only optimized the internal branch if
the star test is true. Otherwise, it optimized all 5 branch lengths
in turn.
*/
double MLQuartetOptimize(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD,
int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
/*IN/OUT*/double branch_lengths[5],
/*OPTIONAL OUT*/bool *pStarTest,
/*OPTIONAL OUT*/double *site_likelihoods);
/* Returns the resulting log likelihood */
double MLPairOptimize(profile_t *pA, profile_t *pB,
int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
/*IN/OUT*/double *branch_length);
/* Returns the number of steps considered, with the actual steps in steps[]
Modifies the tree by this chain of NNIs
*/
int FindSPRSteps(/*IN/OUT*/NJ_t *NJ,
int node,
int parent, /* sibling or parent of node to NNI to start the chain */
/*IN/OUT*/profile_t **upProfiles,
/*OUT*/spr_step_t *steps,
int maxSteps,
bool bFirstAC);
/* Undo a single NNI */
void UnwindSPRStep(/*IN/OUT*/NJ_t *NJ,
/*IN*/spr_step_t *step,
/*IN/OUT*/profile_t **upProfiles);
/* Update the profile of node and its ancestor, and delete nearby out-profiles */
void UpdateForNNI(/*IN/OUT*/NJ_t *NJ, int node, /*IN/OUT*/profile_t **upProfiles, bool useML);
/* Sets NJ->parent[newchild] and replaces oldchild with newchild
in the list of children of parent
*/
void ReplaceChild(/*IN/OUT*/NJ_t *NJ, int parent, int oldchild, int newchild);
int CompareHitsByCriterion(const void *c1, const void *c2);
int CompareHitsByIJ(const void *c1, const void *c2);
int NGaps(NJ_t *NJ, int node); /* only handles leaf sequences */
/* node is the parent of AB, sibling of C
node cannot be root or a leaf
If node is the child of root, then D is the other sibling of node,
and the 4th profile is D's profile.
Otherwise, D is the parent of node, and we use its upprofile
Call this with profiles=NULL to get the nodes, without fetching or
computing profiles
*/
void SetupABCD(NJ_t *NJ, int node,
/* the 4 profiles for ABCD; the last one is an upprofile */
/*OPTIONAL OUT*/profile_t *profiles[4],
/*OPTIONAL IN/OUT*/profile_t **upProfiles,
/*OUT*/int nodeABCD[4],
bool useML);
int Sibling(NJ_t *NJ, int node); /* At root, no unique sibling so returns -1 */
void RootSiblings(NJ_t *NJ, int node, /*OUT*/int sibs[2]);
/* JC probability of nucleotide not changing, for each rate category */
double *PSameVector(double length, rates_t *rates);
/* JC probability of nucleotide not changing, for each rate category */
double *PDiffVector(double *pSame, rates_t *rates);
/* expeigen[iRate*nCodes + j] = exp(length * rate iRate * eigenvalue j) */
numeric_t *ExpEigenRates(double length, transition_matrix_t *transmat, rates_t *rates);
/* Print a progress report if more than 0.1 second has gone by since the progress report */
/* Format should include 0-4 %d references and no newlines */
void ProgressReport(char *format, int iArg1, int iArg2, int iArg3, int iArg4);
void LogTree(char *format, int round, /*OPTIONAL WRITE*/FILE *fp, NJ_t *NJ, char **names, uniquify_t *unique, bool bQuote);
void LogMLRates(/*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ);
void *mymalloc(size_t sz); /* Prints "Out of memory" and exits on failure */
void *myfree(void *, size_t sz); /* Always returns NULL */
/* One-dimensional minimization using brent's function, with
a fractional and an absolute tolerance */
double onedimenmin(double xmin, double xguess, double xmax, double (*f)(double,void*), void *data,
double ftol, double atol,
/*OUT*/double *fx, /*OUT*/double *f2x);
double brent(double ax, double bx, double cx, double (*f)(double, void *), void *data,
double ftol, double atol,
double *foptx, double *f2optx, double fax, double fbx, double fcx);
/* Vector operations, either using SSE3 or not
Code assumes that vectors are a multiple of 4 in size
*/
void vector_multiply(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n, /*OUT*/numeric_t *fOut);
numeric_t vector_multiply_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n);
void vector_add_mult(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t *add, numeric_t weight, int n);
/* multiply the transpose of a matrix by a vector */
void matrixt_by_vector4(/*IN*/numeric_t mat[4][MAXCODES], /*IN*/numeric_t vec[4], /*OUT*/numeric_t out[4]);
/* sum(f1*fBy)*sum(f2*fBy) */
numeric_t vector_dot_product_rot(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* fBy, int n);
/* sum(f1*f2*f3) */
numeric_t vector_multiply3_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* f3, int n);
numeric_t vector_sum(/*IN*/numeric_t *f1, int n);
void vector_multiply_by(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t fBy, int n);
double clockDiff(/*IN*/struct timeval *clock_start);
int timeval_subtract (/*OUT*/struct timeval *result, /*IN*/struct timeval *x, /*IN*/struct timeval *y);
char *OpenMPString(void);
void ran_start(long seed);
double knuth_rand(); /* Random number between 0 and 1 */
void tred2 (double *a, const int n, const int np, double *d, double *e);
double pythag(double a, double b);
void tqli(double *d, double *e, int n, int np, double *z);
/* Like mymalloc; duplicates the input (returns NULL if given NULL) */
void *mymemdup(void *data, size_t sz);
void *myrealloc(void *data, size_t szOld, size_t szNew, bool bCopy);
double pnorm(double z); /* Probability(value <=z) */
/* Hashtable functions */
typedef struct
{
char *string;
int nCount; /* number of times this entry was seen */
int first; /* index of first entry with this value */
} hashbucket_t;
typedef struct {
int nBuckets;
/* hashvalue -> bucket. Or look in bucket + 1, +2, etc., till you hit a NULL string */
hashbucket_t *buckets;
} hashstrings_t;
typedef int hashiterator_t;
hashstrings_t *MakeHashtable(char **strings, int nStrings);
hashstrings_t *FreeHashtable(hashstrings_t* hash); /*returns NULL*/
hashiterator_t FindMatch(hashstrings_t *hash, char *string);
/* Return NULL if we have run out of values */
char *GetHashString(hashstrings_t *hash, hashiterator_t hi);
int HashCount(hashstrings_t *hash, hashiterator_t hi);
int HashFirst(hashstrings_t *hash, hashiterator_t hi);
void PrintNJ(/*WRITE*/FILE *, NJ_t *NJ, char **names, uniquify_t *unique, bool bShowSupport, bool bQuoteNames);
/* Print topology using node indices as node names */
void PrintNJInternal(/*WRITE*/FILE *, NJ_t *NJ, bool useLen);
uniquify_t *UniquifyAln(/*IN*/alignment_t *aln);
uniquify_t *FreeUniquify(uniquify_t *); /* returns NULL */
/* Convert a constraint alignment to a list of sequences. The returned array is indexed
by iUnique and points to values in the input alignment
*/
char **AlnToConstraints(alignment_t *constraints, uniquify_t *unique, hashstrings_t *hashnames);
/* ReadTree ignores non-unique leaves after the first instance.
At the end, it prunes the tree to ignore empty children and it
unroots the tree if necessary.
*/
void ReadTree(/*IN/OUT*/NJ_t *NJ,
/*IN*/uniquify_t *unique,
/*IN*/hashstrings_t *hashnames,
/*READ*/FILE *fpInTree);
char *ReadTreeToken(/*READ*/FILE *fp); /* returns a static array, or NULL on EOF */
void ReadTreeAddChild(int parent, int child, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children);
/* Do not add the leaf if we already set this unique-set to another parent */
void ReadTreeMaybeAddLeaf(int parent, char *name,
hashstrings_t *hashnames, uniquify_t *unique,
/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children);
void ReadTreeRemove(/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children, int node);
/* Routines to support tree traversal and prevent visiting a node >1 time
(esp. if topology changes).
*/
typedef bool *traversal_t;
traversal_t InitTraversal(NJ_t*);
void SkipTraversalInto(int node, /*IN/OUT*/traversal_t traversal);
traversal_t FreeTraversal(traversal_t, NJ_t*); /*returns NULL*/
/* returns new node, or -1 if nothing left to do. Use root for the first call.
Will return every node and then root.
Uses postorder tree traversal (depth-first search going down to leaves first)
Keeps track of which nodes are visited, so even after an NNI that swaps a
visited child with an unvisited uncle, the next call will visit the
was-uncle-now-child. (However, after SPR moves, there is no such guarantee.)
If pUp is not NULL, then, if going "back up" through a previously visited node
(presumably due to an NNI), then it will return the node another time,
with *pUp = true.
*/
int TraversePostorder(int lastnode, NJ_t *NJ, /*IN/OUT*/traversal_t,
/*OUT OPTIONAL*/bool *pUp);
/* Routines to support storing up-profiles during tree traversal
Eventually these should be smart enough to do weighted joins and
to minimize memory usage
*/
profile_t **UpProfiles(NJ_t *NJ);
profile_t *GetUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node, bool useML);
profile_t *DeleteUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node); /* returns NULL */
profile_t **FreeUpProfiles(profile_t **upProfiles, NJ_t *NJ); /* returns NULL */
/* Recomputes the profile for a node, presumably to reflect topology changes
If bionj is set, does a weighted join -- which requires using upProfiles
If useML is set, computes the posterior probability instead of averaging
*/
void RecomputeProfile(/*IN/OUT*/NJ_t *NJ, /*IN/OUT*/profile_t **upProfiles, int node, bool useML);
/* Recompute profiles going up from the leaves, using the provided distance matrix
and unweighted joins
*/
void RecomputeProfiles(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL*/distance_matrix_t *dmat);
void RecomputeMLProfiles(/*IN/OUT*/NJ_t *NJ);
/* If bionj is set, computes the weight to be given to A when computing the
profile for the ancestor of A and B. C and D are the other profiles in the quartet
If bionj is not set, returns -1 (which means unweighted in AverageProfile).
(A and B are the first two profiles in the array)
*/
double QuartetWeight(profile_t *profiles[4], distance_matrix_t *dmat, int nPos);
/* Returns a list of nodes, starting with node and ending with root */
int *PathToRoot(NJ_t *NJ, int node, /*OUT*/int *depth);
int *FreePath(int *path, NJ_t *NJ); /* returns NULL */
/* The default amino acid distance matrix, derived from the BLOSUM45 similarity matrix */
distance_matrix_t matrixBLOSUM45;
/* The default amino acid transition matrix (Jones Taylor Thorton 1992) */
double matrixJTT92[MAXCODES][MAXCODES];
double statJTT92[MAXCODES];
/* The Le-Gascuel 2008 amino acid transition matrix */
double matrixLG08[MAXCODES][MAXCODES];
double statLG08[MAXCODES];
/* The WAG amino acid transition matrix (Whelan-And-Goldman 2001) */
double matrixWAG01[MAXCODES][MAXCODES];
double statWAG01[MAXCODES];
int main(int argc, char **argv) {
int nAlign = 1; /* number of alignments to read */
int iArg;
char *matrixPrefix = NULL;
distance_matrix_t *distance_matrix = NULL;
bool make_matrix = false;
char *constraintsFile = NULL;
char *intreeFile = NULL;
bool intree1 = false; /* the same starting tree each round */
int nni = -1; /* number of rounds of NNI, defaults to 4*log2(n) */
int spr = 2; /* number of rounds of SPR */
int maxSPRLength = 10; /* maximum distance to move a node */
int MLnni = -1; /* number of rounds of ML NNI, defaults to 2*log2(n) */
bool MLlen = false; /* optimize branch lengths; no topology changes */
int nBootstrap = 1000; /* If set, number of replicates of local bootstrap to do */
int nRateCats = nDefaultRateCats;
char *logfile = NULL;
bool bUseGtr = false;
bool bUseLg = false;
bool bUseWag = false;
bool bUseGtrRates = false;
double gtrrates[6] = {1,1,1,1,1,1};
bool bUseGtrFreq = false;
double gtrfreq[4] = {0.25,0.25,0.25,0.25};
bool bQuote = false;
FILE *fpOut = stdout;
if (isatty(STDIN_FILENO) && argc == 1) {
fprintf(stderr,"Usage for FastTree version %s %s%s:\n%s",
FT_VERSION, SSE_STRING, OpenMPString(), usage);
#if (defined _WIN32 || defined WIN32 || defined WIN64 || defined _WIN64)
fprintf(stderr, "Windows users: Please remember to run this inside a command shell\n");
fprintf(stderr,"Hit return to continue\n");
fgetc(stdin);
#endif
exit(0);
}
for (iArg = 1; iArg < argc; iArg++) {
if (strcmp(argv[iArg],"-makematrix") == 0) {
make_matrix = true;
} else if (strcmp(argv[iArg],"-logdist") == 0) {
fprintf(stderr, "Warning: logdist is now on by default and obsolete\n");
} else if (strcmp(argv[iArg],"-rawdist") == 0) {
logdist = false;
} else if (strcmp(argv[iArg],"-verbose") == 0 && iArg < argc-1) {
verbose = atoi(argv[++iArg]);
} else if (strcmp(argv[iArg],"-quiet") == 0) {
verbose = 0;
showProgress = 0;
} else if (strcmp(argv[iArg],"-nopr") == 0) {
showProgress = 0;
} else if (strcmp(argv[iArg],"-slow") == 0) {
slow = 1;
} else if (strcmp(argv[iArg],"-fastest") == 0) {
fastest = 1;
tophitsRefresh = 0.5;
useTopHits2nd = true;
} else if (strcmp(argv[iArg],"-2nd") == 0) {
useTopHits2nd = true;
} else if (strcmp(argv[iArg],"-no2nd") == 0) {
useTopHits2nd = false;
} else if (strcmp(argv[iArg],"-slownni") == 0) {
fastNNI = false;
} else if (strcmp(argv[iArg], "-matrix") == 0 && iArg < argc-1) {
iArg++;
matrixPrefix = argv[iArg];
} else if (strcmp(argv[iArg], "-nomatrix") == 0) {
useMatrix = false;
} else if (strcmp(argv[iArg], "-n") == 0 && iArg < argc-1) {
iArg++;
nAlign = atoi(argv[iArg]);
if (nAlign < 1) {
fprintf(stderr, "-n argument for #input alignments must be > 0 not %s\n", argv[iArg]);
exit(1);
}
} else if (strcmp(argv[iArg], "-quote") == 0) {
bQuote = true;
} else if (strcmp(argv[iArg], "-nt") == 0) {
nCodes = 4;
} else if (strcmp(argv[iArg], "-intree") == 0 && iArg < argc-1) {
iArg++;
intreeFile = argv[iArg];
} else if (strcmp(argv[iArg], "-intree1") == 0 && iArg < argc-1) {
iArg++;
intreeFile = argv[iArg];
intree1 = true;
} else if (strcmp(argv[iArg], "-nj") == 0) {
bionj = 0;
} else if (strcmp(argv[iArg], "-bionj") == 0) {
bionj = 1;
} else if (strcmp(argv[iArg], "-boot") == 0 && iArg < argc-1) {
iArg++;
nBootstrap = atoi(argv[iArg]);
} else if (strcmp(argv[iArg], "-noboot") == 0 || strcmp(argv[iArg], "-nosupport") == 0) {
nBootstrap = 0;
} else if (strcmp(argv[iArg], "-seed") == 0 && iArg < argc-1) {
iArg++;
long seed = atol(argv[iArg]);
ran_start(seed);
} else if (strcmp(argv[iArg],"-top") == 0) {
if(tophitsMult < 0.01)
tophitsMult = 1.0;
} else if (strcmp(argv[iArg],"-notop") == 0) {
tophitsMult = 0.0;
} else if (strcmp(argv[iArg], "-topm") == 0 && iArg < argc-1) {
iArg++;
tophitsMult = atof(argv[iArg]);
} else if (strcmp(argv[iArg], "-close") == 0 && iArg < argc-1) {
iArg++;
tophitsClose = atof(argv[iArg]);
if (tophitsMult <= 0) {
fprintf(stderr, "Cannot use -close unless -top is set above 0\n");
exit(1);
}
if (tophitsClose <= 0 || tophitsClose >= 1) {
fprintf(stderr, "-close argument must be between 0 and 1\n");
exit(1);
}
} else if (strcmp(argv[iArg], "-refresh") == 0 && iArg < argc-1) {
iArg++;
tophitsRefresh = atof(argv[iArg]);
if (tophitsMult <= 0) {
fprintf(stderr, "Cannot use -refresh unless -top is set above 0\n");
exit(1);
}
if (tophitsRefresh <= 0 || tophitsRefresh >= 1) {
fprintf(stderr, "-refresh argument must be between 0 and 1\n");
exit(1);
}
} else if (strcmp(argv[iArg],"-nni") == 0 && iArg < argc-1) {
iArg++;
nni = atoi(argv[iArg]);
if (nni == 0)
spr = 0;
} else if (strcmp(argv[iArg],"-spr") == 0 && iArg < argc-1) {
iArg++;
spr = atoi(argv[iArg]);
} else if (strcmp(argv[iArg],"-sprlength") == 0 && iArg < argc-1) {
iArg++;
maxSPRLength = atoi(argv[iArg]);
} else if (strcmp(argv[iArg],"-mlnni") == 0 && iArg < argc-1) {
iArg++;
MLnni = atoi(argv[iArg]);
} else if (strcmp(argv[iArg],"-noml") == 0) {
MLnni = 0;
} else if (strcmp(argv[iArg],"-mllen") == 0) {
MLnni = 0;
MLlen = true;
} else if (strcmp(argv[iArg],"-nome") == 0) {
spr = 0;
nni = 0;
} else if (strcmp(argv[iArg],"-help") == 0) {
fprintf(stderr,"FastTree %s %s%s:\n%s", FT_VERSION, SSE_STRING, OpenMPString(), usage);
exit(0);
} else if (strcmp(argv[iArg],"-expert") == 0) {
fprintf(stderr, "Detailed usage for FastTree %s %s%s:\n%s",
FT_VERSION, SSE_STRING, OpenMPString(), expertUsage);
exit(0);
} else if (strcmp(argv[iArg],"-pseudo") == 0) {
if (iArg < argc-1 && isdigit(argv[iArg+1][0])) {
iArg++;
pseudoWeight = atof(argv[iArg]);
if (pseudoWeight < 0.0) {
fprintf(stderr,"Illegal argument to -pseudo: %s\n", argv[iArg]);
exit(1);
}
} else {
pseudoWeight = 1.0;
}
} else if (strcmp(argv[iArg],"-constraints") == 0 && iArg < argc-1) {
iArg++;
constraintsFile = argv[iArg];
} else if (strcmp(argv[iArg],"-constraintWeight") == 0 && iArg < argc-1) {
iArg++;
constraintWeight = atof(argv[iArg]);
if (constraintWeight <= 0.0) {
fprintf(stderr, "Illegal argument to -constraintWeight (must be greater than zero): %s\n", argv[iArg]);
exit(1);
}
} else if (strcmp(argv[iArg],"-mlacc") == 0 && iArg < argc-1) {
iArg++;
mlAccuracy = atoi(argv[iArg]);
if (mlAccuracy < 1) {
fprintf(stderr, "Illlegal -mlacc argument: %s\n", argv[iArg]);
exit(1);
}
} else if (strcmp(argv[iArg],"-exactml") == 0 || strcmp(argv[iArg],"-mlexact") == 0) {
fprintf(stderr,"-exactml is not required -- exact posteriors is the default now\n");
} else if (strcmp(argv[iArg],"-approxml") == 0 || strcmp(argv[iArg],"-mlapprox") == 0) {
exactML = false;
} else if (strcmp(argv[iArg],"-cat") == 0 && iArg < argc-1) {
iArg++;
nRateCats = atoi(argv[iArg]);
if (nRateCats < 1) {
fprintf(stderr, "Illlegal argument to -ncat (must be greater than zero): %s\n", argv[iArg]);
exit(1);
}
} else if (strcmp(argv[iArg],"-nocat") == 0) {
nRateCats = 1;
} else if (strcmp(argv[iArg], "-lg") == 0) {
bUseLg = true;
} else if (strcmp(argv[iArg], "-wag") == 0) {
bUseWag = true;
} else if (strcmp(argv[iArg], "-gtr") == 0) {
bUseGtr = true;
} else if (strcmp(argv[iArg], "-gtrrates") == 0 && iArg < argc-6) {
bUseGtr = true;
bUseGtrRates = true;
int i;
for (i = 0; i < 6; i++) {
gtrrates[i] = atof(argv[++iArg]);
if (gtrrates[i] < 1e-5) {
fprintf(stderr, "Illegal or too small value of GTR rate: %s\n", argv[iArg]);
exit(1);
}
}
} else if (strcmp(argv[iArg],"-gtrfreq") == 0 && iArg < argc-4) {
bUseGtr = true;
bUseGtrFreq = true;
int i;
double sum = 0;
for (i = 0; i < 4; i++) {
gtrfreq[i] = atof(argv[++iArg]);
sum += gtrfreq[i];
if (gtrfreq[i] < 1e-5) {
fprintf(stderr, "Illegal or too small value of GTR frequency: %s\n", argv[iArg]);
exit(1);
}
}
if (fabs(1.0-sum) > 0.01) {
fprintf(stderr, "-gtrfreq values do not sum to 1\n");
exit(1);
}
for (i = 0; i < 4; i++)
gtrfreq[i] /= sum;
} else if (strcmp(argv[iArg],"-log") == 0 && iArg < argc-1) {
iArg++;
logfile = argv[iArg];
} else if (strcmp(argv[iArg],"-gamma") == 0) {
gammaLogLk = true;
} else if (strcmp(argv[iArg],"-out") == 0 && iArg < argc-1) {
iArg++;
fpOut = fopen(argv[iArg],"w");
if(fpOut==NULL) {
fprintf(stderr,"Cannot write to %s\n",argv[iArg]);
exit(1);
}
} else if (argv[iArg][0] == '-') {
fprintf(stderr, "Unknown or incorrect use of option %s\n%s", argv[iArg], usage);
exit(1);
} else
break;
}
if(iArg < argc-1) {
fprintf(stderr, "%s", usage);
exit(1);
}
codesString = nCodes == 20 ? codesStringAA : codesStringNT;
if (nCodes == 4 && matrixPrefix == NULL)
useMatrix = false; /* no default nucleotide matrix */
char *fileName = iArg == (argc-1) ? argv[argc-1] : NULL;
if (slow && fastest) {
fprintf(stderr,"Cannot be both slow and fastest\n");
exit(1);
}
if (slow && tophitsMult > 0) {
tophitsMult = 0.0;
}
FILE *fpLog = NULL;
if (logfile != NULL) {
fpLog = fopen(logfile, "w");
if (fpLog == NULL) {
fprintf(stderr, "Cannot write to: %s\n", logfile);
exit(1);
}
fprintf(fpLog, "Command:");
int i;
for (i=0; i < argc; i++)
fprintf(fpLog, " %s", argv[i]);
fprintf(fpLog,"\n");
fflush(fpLog);
}
int i;
FILE *fps[2] = {NULL,NULL};
int nFPs = 0;
if (verbose)
fps[nFPs++] = stderr;
if (fpLog != NULL)
fps[nFPs++] = fpLog;
if (!make_matrix) { /* Report settings */
char tophitString[100] = "no";
char tophitsCloseStr[100] = "default";
if(tophitsClose > 0) sprintf(tophitsCloseStr,"%.2f",tophitsClose);
if(tophitsMult>0) sprintf(tophitString,"%.2f*sqrtN close=%s refresh=%.2f",
tophitsMult, tophitsCloseStr, tophitsRefresh);
char supportString[100] = "none";
if (nBootstrap>0) {
if (MLnni != 0 || MLlen)
sprintf(supportString, "SH-like %d", nBootstrap);
else
sprintf(supportString,"Local boot %d",nBootstrap);
}
char nniString[100] = "(no NNI)";
if (nni > 0)
sprintf(nniString, "+NNI (%d rounds)", nni);
if (nni == -1)
strcpy(nniString, "+NNI");
char sprString[100] = "(no SPR)";
if (spr > 0)
sprintf(sprString, "+SPR (%d rounds range %d)", spr, maxSPRLength);
char mlnniString[100] = "(no ML-NNI)";
if(MLnni > 0)
sprintf(mlnniString, "+ML-NNI (%d rounds)", MLnni);
else if (MLnni == -1)
sprintf(mlnniString, "+ML-NNI");
else if (MLlen)
sprintf(mlnniString, "+ML branch lengths");
if ((MLlen || MLnni != 0) && !exactML)
strcat(mlnniString, " approx");
if (MLnni != 0)
sprintf(mlnniString+strlen(mlnniString), " opt-each=%d",mlAccuracy);
for (i = 0; i < nFPs; i++) {
FILE *fp = fps[i];
fprintf(fp,"FastTree Version %s %s%s\nAlignment: %s",
FT_VERSION, SSE_STRING, OpenMPString(), fileName != NULL ? fileName : "standard input");
if (nAlign>1)
fprintf(fp, " (%d alignments)", nAlign);
fprintf(fp,"\n%s distances: %s Joins: %s Support: %s\n",
nCodes == 20 ? "Amino acid" : "Nucleotide",
matrixPrefix ? matrixPrefix : (useMatrix? "BLOSUM45"
: (nCodes==4 && logdist ? "Jukes-Cantor" : "%different")),
bionj ? "weighted" : "balanced" ,
supportString);
if (intreeFile == NULL)
fprintf(fp, "Search: %s%s %s %s %s\nTopHits: %s\n",
slow?"Exhaustive (slow)" : (fastest ? "Fastest" : "Normal"),
useTopHits2nd ? "+2nd" : "",
nniString, sprString, mlnniString,
tophitString);
else
fprintf(fp, "Start at tree from %s %s %s\n", intreeFile, nniString, sprString);
if (MLnni != 0 || MLlen) {
fprintf(fp, "ML Model: %s,",
(nCodes == 4) ?
(bUseGtr ? "Generalized Time-Reversible" : "Jukes-Cantor") :
(bUseLg ? "Le-Gascuel 2008" : (bUseWag ? "Whelan-And-Goldman" : "Jones-Taylor-Thorton"))
);
if (nRateCats == 1)
fprintf(fp, " No rate variation across sites");
else
fprintf(fp, " CAT approximation with %d rate categories", nRateCats);
fprintf(fp, "\n");
if (nCodes == 4 && bUseGtrRates)
fprintf(fp, "GTR rates(ac ag at cg ct gt) %.4f %.4f %.4f %.4f %.4f %.4f\n",
gtrrates[0],gtrrates[1],gtrrates[2],gtrrates[3],gtrrates[4],gtrrates[5]);
if (nCodes == 4 && bUseGtrFreq)
fprintf(fp, "GTR frequencies(A C G T) %.4f %.4f %.4f %.4f\n",
gtrfreq[0],gtrfreq[1],gtrfreq[2],gtrfreq[3]);
}
if (constraintsFile != NULL)
fprintf(fp, "Constraints: %s Weight: %.3f\n", constraintsFile, constraintWeight);
if (pseudoWeight > 0)
fprintf(fp, "Pseudocount weight for comparing sequences with little overlap: %.3lf\n",pseudoWeight);
fflush(fp);
}
}
if (matrixPrefix != NULL) {
if (!useMatrix) {
fprintf(stderr,"Cannot use both -matrix and -nomatrix arguments!");
exit(1);
}
distance_matrix = ReadDistanceMatrix(matrixPrefix);
} else if (useMatrix) { /* use default matrix */
assert(nCodes==20);
distance_matrix = &matrixBLOSUM45;
SetupDistanceMatrix(distance_matrix);
} else {
distance_matrix = NULL;
}
int iAln;
FILE *fpIn = fileName != NULL ? fopen(fileName, "r") : stdin;
if (fpIn == NULL) {
fprintf(stderr, "Cannot read %s\n", fileName);
exit(1);
}
FILE *fpConstraints = NULL;
if (constraintsFile != NULL) {
fpConstraints = fopen(constraintsFile, "r");
if (fpConstraints == NULL) {
fprintf(stderr, "Cannot read %s\n", constraintsFile);
exit(1);
}
}
FILE *fpInTree = NULL;
if (intreeFile != NULL) {
fpInTree = fopen(intreeFile,"r");
if (fpInTree == NULL) {
fprintf(stderr, "Cannot read %s\n", intreeFile);
exit(1);
}
}
for(iAln = 0; iAln < nAlign; iAln++) {
alignment_t *aln = ReadAlignment(fpIn, bQuote);
if (aln->nSeq < 1) {
fprintf(stderr, "No alignment sequences\n");
exit(1);
}
if (fpLog) {
fprintf(fpLog, "Read %d sequences, %d positions\n", aln->nSeq, aln->nPos);
fflush(fpLog);
}
struct timeval clock_start;
gettimeofday(&clock_start,NULL);
ProgressReport("Read alignment",0,0,0,0);
/* Check that all names in alignment are unique */
hashstrings_t *hashnames = MakeHashtable(aln->names, aln->nSeq);
int i;
for (i=0; i<aln->nSeq; i++) {
hashiterator_t hi = FindMatch(hashnames,aln->names[i]);
if (HashCount(hashnames,hi) != 1) {
fprintf(stderr,"Non-unique name '%s' in the alignment\n",aln->names[i]);
exit(1);
}
}
/* Make a list of unique sequences -- note some lists are bigger than required */
ProgressReport("Hashed the names",0,0,0,0);
if (make_matrix) {
NJ_t *NJ = InitNJ(aln->seqs, aln->nSeq, aln->nPos,
/*constraintSeqs*/NULL, /*nConstraints*/0,
distance_matrix, /*transmat*/NULL);
printf(" %d\n",aln->nSeq);
int i,j;
for(i = 0; i < NJ->nSeq; i++) {
printf("%s",aln->names[i]);
for (j = 0; j < NJ->nSeq; j++) {
besthit_t hit;
SeqDist(NJ->profiles[i]->codes,NJ->profiles[j]->codes,NJ->nPos,NJ->distance_matrix,/*OUT*/&hit);
if (logdist)
hit.dist = LogCorrect(hit.dist);
/* Make sure -0 prints as 0 */
printf(" %f", hit.dist <= 0.0 ? 0.0 : hit.dist);
}
printf("\n");
}
} else {
/* reset counters*/
profileOps = 0;
outprofileOps = 0;
seqOps = 0;
profileAvgOps = 0;
nHillBetter = 0;
nCloseUsed = 0;
nClose2Used = 0;
nRefreshTopHits = 0;
nVisibleUpdate = 0;
nNNI = 0;
nML_NNI = 0;
nProfileFreqAlloc = 0;
nProfileFreqAvoid = 0;
szAllAlloc = 0;
mymallocUsed = 0;
maxmallocHeap = 0;
nLkCompute = 0;
nPosteriorCompute = 0;
nAAPosteriorExact = 0;
nAAPosteriorRough = 0;
nStarTests = 0;
uniquify_t *unique = UniquifyAln(aln);
ProgressReport("Identified unique sequences",0,0,0,0);
/* read constraints */
alignment_t *constraints = NULL;
char **uniqConstraints = NULL;
if (constraintsFile != NULL) {
constraints = ReadAlignment(fpConstraints, bQuote);
if (constraints->nSeq < 4) {
fprintf(stderr, "Warning: constraints file with less than 4 sequences ignored:\nalignment #%d in %s\n",
iAln+1, constraintsFile);
constraints = FreeAlignment(constraints);
} else {
uniqConstraints = AlnToConstraints(constraints, unique, hashnames);
ProgressReport("Read the constraints",0,0,0,0);
}
} /* end load constraints */
transition_matrix_t *transmat = NULL;
if (nCodes == 20) {
transmat = bUseLg? CreateTransitionMatrix(matrixLG08,statLG08) :
(bUseWag? CreateTransitionMatrix(matrixWAG01,statWAG01) :
CreateTransitionMatrix(matrixJTT92,statJTT92));
} else if (nCodes == 4 && bUseGtr && (bUseGtrRates || bUseGtrFreq)) {
transmat = CreateGTR(gtrrates,gtrfreq);
}
NJ_t *NJ = InitNJ(unique->uniqueSeq, unique->nUnique, aln->nPos,
uniqConstraints,
uniqConstraints != NULL ? constraints->nPos : 0, /* nConstraints */
distance_matrix,
transmat);
if (verbose>2) fprintf(stderr, "read %s seqs %d (%d unique) positions %d nameLast %s seqLast %s\n",
fileName ? fileName : "standard input",
aln->nSeq, unique->nUnique, aln->nPos, aln->names[aln->nSeq-1], aln->seqs[aln->nSeq-1]);
FreeAlignmentSeqs(/*IN/OUT*/aln); /*no longer needed*/
if (fpInTree != NULL) {
if (intree1)
fseek(fpInTree, 0L, SEEK_SET);
ReadTree(/*IN/OUT*/NJ, /*IN*/unique, /*IN*/hashnames, /*READ*/fpInTree);
if (verbose > 2)
fprintf(stderr, "Read tree from %s\n", intreeFile);
if (verbose > 2)
PrintNJ(stderr, NJ, aln->names, unique, /*support*/false, bQuote);
} else {
FastNJ(NJ);
}
LogTree("NJ", 0, fpLog, NJ, aln->names, unique, bQuote);
/* profile-frequencies for the "up-profiles" in ReliabilityNJ take only diameter(Tree)*L*a
space not N*L*a space, because we can free them as we go.
And up-profile by their nature tend to be complicated.
So save the profile-frequency memory allocation counters now to exclude later results.
*/
#ifdef TRACK_MEMORY
long svProfileFreqAlloc = nProfileFreqAlloc;
long svProfileFreqAvoid = nProfileFreqAvoid;
#endif
int nniToDo = nni == -1 ? (int)(0.5 + 4.0 * log(NJ->nSeq)/log(2)) : nni;
int sprRemaining = spr;
int MLnniToDo = (MLnni != -1) ? MLnni : (int)(0.5 + 2.0*log(NJ->nSeq)/log(2));
if(verbose>0) {
if (fpInTree == NULL)
fprintf(stderr, "Initial topology in %.2f seconds\n", clockDiff(&clock_start));
if (spr > 0 || nniToDo > 0 || MLnniToDo > 0)
fprintf(stderr,"Refining topology: %d rounds ME-NNIs, %d rounds ME-SPRs, %d rounds ML-NNIs\n", nniToDo, spr, MLnniToDo);
}
if (nniToDo>0) {
int i;
bool bConverged = false;
nni_stats_t *nni_stats = InitNNIStats(NJ);
for (i=0; i < nniToDo; i++) {
double maxDelta;
if (!bConverged) {
int nChange = NNI(/*IN/OUT*/NJ, i, nniToDo, /*use ml*/false, /*IN/OUT*/nni_stats, /*OUT*/&maxDelta);
LogTree("ME_NNI%d",i+1, fpLog, NJ, aln->names, unique, bQuote);
if (nChange == 0) {
bConverged = true;
if (verbose>1)
fprintf(stderr, "Min_evolution NNIs converged at round %d -- skipping some rounds\n", i+1);
if (fpLog)
fprintf(fpLog, "Min_evolution NNIs converged at round %d -- skipping some rounds\n", i+1);
}
}
/* Interleave SPRs with NNIs (typically 1/3rd NNI, SPR, 1/3rd NNI, SPR, 1/3rd NNI */
if (sprRemaining > 0 && (nniToDo/(spr+1) > 0 && ((i+1) % (nniToDo/(spr+1))) == 0)) {
SPR(/*IN/OUT*/NJ, maxSPRLength, spr-sprRemaining, spr);
LogTree("ME_SPR%d",spr-sprRemaining+1, fpLog, NJ, aln->names, unique, bQuote);
sprRemaining--;
/* Restart the NNIs -- set all ages to 0, etc. */
bConverged = false;
nni_stats = FreeNNIStats(nni_stats, NJ);
nni_stats = InitNNIStats(NJ);
}
}
nni_stats = FreeNNIStats(nni_stats, NJ);
}
while(sprRemaining > 0) { /* do any remaining SPR rounds */
SPR(/*IN/OUT*/NJ, maxSPRLength, spr-sprRemaining, spr);
LogTree("ME_SPR%d",spr-sprRemaining+1, fpLog, NJ, aln->names, unique, bQuote);
sprRemaining--;
}
/* In minimum-evolution mode, update branch lengths, even if no NNIs or SPRs,
so that they are log-corrected, do not include penalties from constraints,
and avoid errors due to approximation of out-distances.
If doing maximum-likelihood NNIs, then we'll also use these
to get estimates of starting distances for quartets, etc.
*/
UpdateBranchLengths(/*IN/OUT*/NJ);
LogTree("ME_Lengths",0, fpLog, NJ, aln->names, unique, bQuote);
double total_len = 0;
int iNode;
for (iNode = 0; iNode < NJ->maxnode; iNode++)
total_len += fabs(NJ->branchlength[iNode]);
if (verbose>0) {
fprintf(stderr, "Total branch-length %.3f after %.2f sec\n",
total_len, clockDiff(&clock_start));
fflush(stderr);
}
if (fpLog) {
fprintf(fpLog, "Total branch-length %.3f after %.2f sec\n",
total_len, clockDiff(&clock_start));
fflush(stderr);
}
#ifdef TRACK_MEMORY
if (verbose>1) {
struct mallinfo mi = mallinfo();
fprintf(stderr, "Memory @ end of ME phase: %.2f MB (%.1f byte/pos) useful %.2f expected %.2f\n",
(mi.arena+mi.hblkhd)/1.0e6, (mi.arena+mi.hblkhd)/(double)(NJ->nSeq*(double)NJ->nPos),
mi.uordblks/1.0e6, mymallocUsed/1e6);
}
#endif
SplitCount_t splitcount = {0,0,0,0,0.0,0.0};
if (MLnniToDo > 0 || MLlen) {
bool warn_len = total_len/NJ->maxnode < 0.001 && MLMinBranchLengthTolerance > 1.0/aln->nPos;
bool warn = warn_len || (total_len/NJ->maxnode < 0.001 && aln->nPos >= 10000);
if (warn)
fprintf(stderr, "\nWARNING! This alignment consists of closely-related and very-long sequences.\n");
if (warn_len)
fprintf(stderr,
"This version of FastTree may not report reasonable branch lengths!\n"
#ifdef USE_DOUBLE
"Consider changing MLMinBranchLengthTolerance.\n"
#else
"Consider recompiling FastTree with -DUSE_DOUBLE.\n"
#endif
"For more information, visit\n"
"http://www.microbesonline.org/fasttree/#BranchLen\n\n");
if (warn)
fprintf(stderr, "WARNING! FastTree (or other standard maximum-likelihood tools)\n"
"may not be appropriate for aligments of very closely-related sequences\n"
"like this one, as FastTree does not account for recombination or gene conversion\n\n");
/* Do maximum-likelihood computations */
/* Convert profiles to use the transition matrix */
distance_matrix_t *tmatAsDist = TransMatToDistanceMat(/*OPTIONAL*/NJ->transmat);
RecomputeProfiles(NJ, /*OPTIONAL*/tmatAsDist);
tmatAsDist = myfree(tmatAsDist, sizeof(distance_matrix_t));
double lastloglk = -1e20;
nni_stats_t *nni_stats = InitNNIStats(NJ);
bool resetGtr = nCodes == 4 && bUseGtr && !bUseGtrRates;
if (MLlen) {
int iRound;
int maxRound = (int)(0.5 + log(NJ->nSeq)/log(2));
double dLastLogLk = -1e20;
for (iRound = 1; iRound <= maxRound; iRound++) {
int node;
numeric_t *oldlength = (numeric_t*)mymalloc(sizeof(numeric_t)*NJ->maxnodes);
for (node = 0; node < NJ->maxnode; node++)
oldlength[node] = NJ->branchlength[node];
OptimizeAllBranchLengths(/*IN/OUT*/NJ);
LogTree("ML_Lengths",iRound, fpLog, NJ, aln->names, unique, bQuote);
double dMaxChange = 0; /* biggest change in branch length */
for (node = 0; node < NJ->maxnode; node++) {
double d = fabs(oldlength[node] - NJ->branchlength[node]);
if (dMaxChange < d)
dMaxChange = d;
}
oldlength = myfree(oldlength, sizeof(numeric_t)*NJ->maxnodes);
double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL);
bool bConverged = iRound > 1 && (dMaxChange < 0.001 || loglk < (dLastLogLk+treeLogLkDelta));
if (verbose)
fprintf(stderr, "%d rounds ML lengths: LogLk %s= %.3lf Max-change %.4lf%s Time %.2f\n",
iRound,
exactML || nCodes != 20 ? "" : "~",
loglk,
dMaxChange,
bConverged ? " (converged)" : "",
clockDiff(&clock_start));
if (fpLog)
fprintf(fpLog, "TreeLogLk\tLength%d\t%.4lf\tMaxChange\t%.4lf\n",
iRound, loglk, dMaxChange);
if (iRound == 1) {
if (resetGtr)
SetMLGtr(/*IN/OUT*/NJ, bUseGtrFreq ? gtrfreq : NULL, fpLog);
SetMLRates(/*IN/OUT*/NJ, nRateCats);
LogMLRates(fpLog, NJ);
}
if (bConverged)
break;
}
}
if (MLnniToDo > 0) {
/* This may help us converge faster, and is fast */
OptimizeAllBranchLengths(/*IN/OUT*/NJ);
LogTree("ML_Lengths%d",1, fpLog, NJ, aln->names, unique, bQuote);
}
int iMLnni;
double maxDelta;
bool bConverged = false;
for (iMLnni = 0; iMLnni < MLnniToDo; iMLnni++) {
int changes = NNI(/*IN/OUT*/NJ, iMLnni, MLnniToDo, /*use ml*/true, /*IN/OUT*/nni_stats, /*OUT*/&maxDelta);
LogTree("ML_NNI%d",iMLnni+1, fpLog, NJ, aln->names, unique, bQuote);
double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL);
bool bConvergedHere = (iMLnni > 0) && ((loglk < lastloglk + treeLogLkDelta) || maxDelta < treeLogLkDelta);
if (verbose)
fprintf(stderr, "ML-NNI round %d: LogLk %s= %.3f NNIs %d max delta %.2f Time %.2f%s\n",
iMLnni+1,
exactML || nCodes != 20 ? "" : "~",
loglk, changes, maxDelta, clockDiff(&clock_start),
bConverged ? " (final)" : "");
if (fpLog)
fprintf(fpLog, "TreeLogLk\tML_NNI%d\t%.4lf\tMaxChange\t%.4lf\n", iMLnni+1, loglk, maxDelta);
if (bConverged)
break; /* we did our extra round */
if (bConvergedHere)
bConverged = true;
if (bConverged || iMLnni == MLnniToDo-2) {
/* last round uses high-accuracy seettings -- reset NNI stats to tone down heuristics */
nni_stats = FreeNNIStats(nni_stats, NJ);
nni_stats = InitNNIStats(NJ);
if (verbose)
fprintf(stderr, "Turning off heuristics for final round of ML NNIs%s\n",
bConvergedHere? " (converged)" : "");
if (fpLog)
fprintf(fpLog, "Turning off heuristics for final round of ML NNIs%s\n",
bConvergedHere? " (converged)" : "");
}
lastloglk = loglk;
if (iMLnni == 0 && NJ->rates.nRateCategories == 1) {
if (resetGtr)
SetMLGtr(/*IN/OUT*/NJ, bUseGtrFreq ? gtrfreq : NULL, fpLog);
SetMLRates(/*IN/OUT*/NJ, nRateCats);
LogMLRates(fpLog, NJ);
}
}
nni_stats = FreeNNIStats(nni_stats, NJ);
/* This does not take long and improves the results */
if (MLnniToDo > 0) {
OptimizeAllBranchLengths(/*IN/OUT*/NJ);
LogTree("ML_Lengths%d",2, fpLog, NJ, aln->names, unique, bQuote);
if (verbose || fpLog) {
double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL);
if (verbose)
fprintf(stderr, "Optimize all lengths: LogLk %s= %.3f Time %.2f\n",
exactML || nCodes != 20 ? "" : "~",
loglk,
clockDiff(&clock_start));
if (fpLog) {
fprintf(fpLog, "TreeLogLk\tML_Lengths%d\t%.4f\n", 2, loglk);
fflush(fpLog);
}
}
}
/* Count bad splits and compute SH-like supports if desired */
if ((MLnniToDo > 0 && !fastest) || nBootstrap > 0)
TestSplitsML(NJ, /*OUT*/&splitcount, nBootstrap);
/* Compute gamma-based likelihood? */
if (gammaLogLk && nRateCats > 1) {
numeric_t *rates = MLSiteRates(nRateCats);
double *site_loglk = MLSiteLikelihoodsByRate(NJ, rates, nRateCats);
double scale = RescaleGammaLogLk(NJ->nPos, nRateCats, rates, /*IN*/site_loglk, /*OPTIONAL*/fpLog);
rates = myfree(rates, sizeof(numeric_t) * nRateCats);
site_loglk = myfree(site_loglk, sizeof(double) * nRateCats * NJ->nPos);
for (i = 0; i < NJ->maxnodes; i++)
NJ->branchlength[i] *= scale;
}
} else {
/* Minimum evolution supports */
TestSplitsMinEvo(NJ, /*OUT*/&splitcount);
if (nBootstrap > 0)
ReliabilityNJ(NJ, nBootstrap);
}
for (i = 0; i < nFPs; i++) {
FILE *fp = fps[i];
fprintf(fp, "Total time: %.2f seconds Unique: %d/%d Bad splits: %d/%d",
clockDiff(&clock_start),
NJ->nSeq, aln->nSeq,
splitcount.nBadSplits, splitcount.nSplits);
if (splitcount.dWorstDeltaUnconstrained > 0)
fprintf(fp, " Worst %sdelta-%s %.3f",
uniqConstraints != NULL ? "unconstrained " : "",
(MLnniToDo > 0 || MLlen) ? "LogLk" : "Len",
splitcount.dWorstDeltaUnconstrained);
fprintf(fp,"\n");
if (NJ->nSeq > 3 && NJ->nConstraints > 0) {
fprintf(fp, "Violating constraints: %d both bad: %d",
splitcount.nConstraintViolations, splitcount.nBadBoth);
if (splitcount.dWorstDeltaConstrained > 0)
fprintf(fp, " Worst delta-%s due to constraints: %.3f",
(MLnniToDo > 0 || MLlen) ? "LogLk" : "Len",
splitcount.dWorstDeltaConstrained);
fprintf(fp,"\n");
}
if (verbose > 1 || fp == fpLog) {
double dN2 = NJ->nSeq*(double)NJ->nSeq;
fprintf(fp, "Dist/N**2: by-profile %.3f (out %.3f) by-leaf %.3f avg-prof %.3f\n",
profileOps/dN2, outprofileOps/dN2, seqOps/dN2, profileAvgOps/dN2);
if (nCloseUsed>0 || nClose2Used > 0 || nRefreshTopHits>0)
fprintf(fp, "Top hits: close neighbors %ld/%d 2nd-level %ld refreshes %ld",
nCloseUsed, NJ->nSeq, nClose2Used, nRefreshTopHits);
if(!slow) fprintf(fp, " Hill-climb: %ld Update-best: %ld\n", nHillBetter, nVisibleUpdate);
if (nniToDo > 0 || spr > 0 || MLnniToDo > 0)
fprintf(fp, "NNI: %ld SPR: %ld ML-NNI: %ld\n", nNNI, nSPR, nML_NNI);
if (MLnniToDo > 0) {
fprintf(fp, "Max-lk operations: lk %ld posterior %ld", nLkCompute, nPosteriorCompute);
if (nAAPosteriorExact > 0 || nAAPosteriorRough > 0)
fprintf(fp, " approximate-posteriors %.2f%%",
(100.0*nAAPosteriorRough)/(double)(nAAPosteriorExact+nAAPosteriorRough));
if (mlAccuracy < 2)
fprintf(fp, " star-only %ld", nStarTests);
fprintf(fp, "\n");
}
}
#ifdef TRACK_MEMORY
fprintf(fp, "Memory: %.2f MB (%.1f byte/pos) ",
maxmallocHeap/1.0e6, maxmallocHeap/(double)(aln->nSeq*(double)aln->nPos));
/* Only report numbers from before we do reliability estimates */
fprintf(fp, "profile-freq-alloc %ld avoided %.2f%%\n",
svProfileFreqAlloc,
svProfileFreqAvoid > 0 ?
100.0*svProfileFreqAvoid/(double)(svProfileFreqAlloc+svProfileFreqAvoid)
: 0);
#endif
fflush(fp);
}
PrintNJ(fpOut, NJ, aln->names, unique, /*support*/nBootstrap > 0, bQuote);
fflush(fpOut);
if (fpLog) {
fprintf(fpLog,"TreeCompleted\n");
fflush(fpLog);
}
FreeNJ(NJ);
if (uniqConstraints != NULL)
uniqConstraints = myfree(uniqConstraints, sizeof(char*) * unique->nUnique);
constraints = FreeAlignment(constraints);
unique = FreeUniquify(unique);
} /* end build tree */
hashnames = FreeHashtable(hashnames);
aln = FreeAlignment(aln);
} /* end loop over alignments */
if (fpLog != NULL)
fclose(fpLog);
if (fpOut != stdout) fclose(fpOut);
exit(0);
}
void ProgressReport(char *format, int i1, int i2, int i3, int i4) {
static bool time_set = false;
static struct timeval time_last;
static struct timeval time_begin;
if (!showProgress)
return;
static struct timeval time_now;
gettimeofday(&time_now,NULL);
if (!time_set) {
time_begin = time_last = time_now;
time_set = true;
}
static struct timeval elapsed;
timeval_subtract(&elapsed,&time_now,&time_last);
if (elapsed.tv_sec > 1 || elapsed.tv_usec > 100*1000 || verbose > 1) {
timeval_subtract(&elapsed,&time_now,&time_begin);
fprintf(stderr, "%7i.%2.2i seconds: ", (int)elapsed.tv_sec, (int)(elapsed.tv_usec/10000));
fprintf(stderr, format, i1, i2, i3, i4);
if (verbose > 1 || !isatty(STDERR_FILENO)) {
fprintf(stderr, "\n");
} else {
fprintf(stderr, " \r");
}
fflush(stderr);
time_last = time_now;
}
}
void LogMLRates(/*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ) {
if (fpLog != NULL) {
rates_t *rates = &NJ->rates;
fprintf(fpLog, "NCategories\t%d\nRates",rates->nRateCategories);
assert(rates->nRateCategories > 0);
int iRate;
for (iRate = 0; iRate < rates->nRateCategories; iRate++)
fprintf(fpLog, " %f", rates->rates[iRate]);
fprintf(fpLog,"\nSiteCategories");
int iPos;
for (iPos = 0; iPos < NJ->nPos; iPos++) {
iRate = rates->ratecat[iPos];
fprintf(fpLog," %d",iRate+1);
}
fprintf(fpLog,"\n");
fflush(fpLog);
}
}
void LogTree(char *format, int i, /*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ, char **names, uniquify_t *unique, bool bQuote) {
if(fpLog != NULL) {
fprintf(fpLog, format, i);
fprintf(fpLog, "\t");
PrintNJ(fpLog, NJ, names, unique, /*support*/false, bQuote);
fflush(fpLog);
}
}
NJ_t *InitNJ(char **sequences, int nSeq, int nPos,
/*OPTIONAL*/char **constraintSeqs, int nConstraints,
/*OPTIONAL*/distance_matrix_t *distance_matrix,
/*OPTIONAL*/transition_matrix_t *transmat) {
int iNode;
NJ_t *NJ = (NJ_t*)mymalloc(sizeof(NJ_t));
NJ->root = -1; /* set at end of FastNJ() */
NJ->maxnode = NJ->nSeq = nSeq;
NJ->nPos = nPos;
NJ->maxnodes = 2*nSeq;
NJ->seqs = sequences;
NJ->distance_matrix = distance_matrix;
NJ->transmat = transmat;
NJ->nConstraints = nConstraints;
NJ->constraintSeqs = constraintSeqs;
NJ->profiles = (profile_t **)mymalloc(sizeof(profile_t*) * NJ->maxnodes);
unsigned long counts[256];
int i;
for (i = 0; i < 256; i++)
counts[i] = 0;
for (iNode = 0; iNode < NJ->nSeq; iNode++) {
NJ->profiles[iNode] = SeqToProfile(NJ, NJ->seqs[iNode], nPos,
constraintSeqs != NULL ? constraintSeqs[iNode] : NULL,
nConstraints,
iNode,
/*IN/OUT*/counts);
}
unsigned long totCount = 0;
for (i = 0; i < 256; i++)
totCount += counts[i];
/* warnings about unknown characters */
for (i = 0; i < 256; i++) {
if (counts[i] == 0 || i == '.' || i == '-')
continue;
unsigned char *codesP;
bool bMatched = false;
for (codesP = codesString; *codesP != '\0'; codesP++) {
if (*codesP == i || tolower(*codesP) == i) {
bMatched = true;
break;
}
}
if (!bMatched)
fprintf(stderr, "Ignored unknown character %c (seen %lu times)\n", i, counts[i]);
}
/* warnings about the counts */
double fACGTUN = (counts['A'] + counts['C'] + counts['G'] + counts['T'] + counts['U'] + counts['N']
+ counts['a'] + counts['c'] + counts['g'] + counts['t'] + counts['u'] + counts['n'])
/ (double)(totCount - counts['-'] - counts['.']);
if (nCodes == 4 && fACGTUN < 0.9)
fprintf(stderr, "WARNING! ONLY %.1f%% NUCLEOTIDE CHARACTERS -- IS THIS REALLY A NUCLEOTIDE ALIGNMENT?\n",
100.0 * fACGTUN);
else if (nCodes == 20 && fACGTUN >= 0.9)
fprintf(stderr, "WARNING! %.1f%% NUCLEOTIDE CHARACTERS -- IS THIS REALLY A PROTEIN ALIGNMENT?\n",
100.0 * fACGTUN);
if(verbose>10) fprintf(stderr,"Made sequence profiles\n");
for (iNode = NJ->nSeq; iNode < NJ->maxnodes; iNode++)
NJ->profiles[iNode] = NULL; /* not yet exists */
NJ->outprofile = OutProfile(NJ->profiles, NJ->nSeq,
NJ->nPos, NJ->nConstraints,
NJ->distance_matrix);
if(verbose>10) fprintf(stderr,"Made out-profile\n");
NJ->totdiam = 0.0;
NJ->diameter = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->diameter[iNode] = 0;
NJ->varDiameter = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->varDiameter[iNode] = 0;
NJ->selfdist = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->selfdist[iNode] = 0;
NJ->selfweight = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->nSeq; iNode++)
NJ->selfweight[iNode] = NJ->nPos - NGaps(NJ,iNode);
NJ->outDistances = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes);
NJ->nOutDistActive = (int *)mymalloc(sizeof(int)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->maxnodes; iNode++)
NJ->nOutDistActive[iNode] = NJ->nSeq * 10; /* unreasonably high value */
NJ->parent = NULL; /* so SetOutDistance ignores it */
for (iNode = 0; iNode < NJ->nSeq; iNode++)
SetOutDistance(/*IN/UPDATE*/NJ, iNode, /*nActive*/NJ->nSeq);
if (verbose>2) {
for (iNode = 0; iNode < 4 && iNode < NJ->nSeq; iNode++)
fprintf(stderr, "Node %d outdist %f\n", iNode, NJ->outDistances[iNode]);
}
NJ->parent = (int *)mymalloc(sizeof(int)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->parent[iNode] = -1;
NJ->branchlength = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); /* distance to parent */
for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->branchlength[iNode] = 0;
NJ->support = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->support[iNode] = -1.0;
NJ->child = (children_t*)mymalloc(sizeof(children_t)*NJ->maxnodes);
for (iNode= 0; iNode < NJ->maxnode; iNode++) NJ->child[iNode].nChild = 0;
NJ->rates.nRateCategories = 0;
NJ->rates.rates = NULL;
NJ->rates.ratecat = NULL;
AllocRateCategories(&NJ->rates, 1, NJ->nPos);
return(NJ);
}
NJ_t *FreeNJ(NJ_t *NJ) {
if (NJ==NULL)
return(NJ);
int i;
for (i=0; i < NJ->maxnode; i++)
NJ->profiles[i] = FreeProfile(NJ->profiles[i], NJ->nPos, NJ->nConstraints);
NJ->profiles = myfree(NJ->profiles, sizeof(profile_t*) * NJ->maxnodes);
NJ->outprofile = FreeProfile(NJ->outprofile, NJ->nPos, NJ->nConstraints);
NJ->diameter = myfree(NJ->diameter, sizeof(numeric_t)*NJ->maxnodes);
NJ->varDiameter = myfree(NJ->varDiameter, sizeof(numeric_t)*NJ->maxnodes);
NJ->selfdist = myfree(NJ->selfdist, sizeof(numeric_t)*NJ->maxnodes);
NJ->selfweight = myfree(NJ->selfweight, sizeof(numeric_t)*NJ->maxnodes);
NJ->outDistances = myfree(NJ->outDistances, sizeof(numeric_t)*NJ->maxnodes);
NJ->nOutDistActive = myfree(NJ->nOutDistActive, sizeof(int)*NJ->maxnodes);
NJ->parent = myfree(NJ->parent, sizeof(int)*NJ->maxnodes);
NJ->branchlength = myfree(NJ->branchlength, sizeof(numeric_t)*NJ->maxnodes);
NJ->support = myfree(NJ->support, sizeof(numeric_t)*NJ->maxnodes);
NJ->child = myfree(NJ->child, sizeof(children_t)*NJ->maxnodes);
NJ->transmat = myfree(NJ->transmat, sizeof(transition_matrix_t));
AllocRateCategories(&NJ->rates, 0, NJ->nPos);
return(myfree(NJ, sizeof(NJ_t)));
}
/* Allocate or reallocate the rate categories, and set every position
to category 0 and every category's rate to 1.0
If nRateCategories=0, just deallocate
*/
void AllocRateCategories(/*IN/OUT*/rates_t *rates, int nRateCategories, int nPos) {
assert(nRateCategories >= 0);
rates->rates = myfree(rates->rates, sizeof(numeric_t)*rates->nRateCategories);
rates->ratecat = myfree(rates->ratecat, sizeof(unsigned int)*nPos);
rates->nRateCategories = nRateCategories;
if (rates->nRateCategories > 0) {
rates->rates = (numeric_t*)mymalloc(sizeof(numeric_t)*rates->nRateCategories);
int i;
for (i = 0; i < nRateCategories; i++)
rates->rates[i] = 1.0;
rates->ratecat = (unsigned int *)mymalloc(sizeof(unsigned int)*nPos);
for (i = 0; i < nPos; i++)
rates->ratecat[i] = 0;
}
}
void FastNJ(NJ_t *NJ) {
int iNode;
assert(NJ->nSeq >= 1);
if (NJ->nSeq < 3) {
NJ->root = NJ->maxnode++;
NJ->child[NJ->root].nChild = NJ->nSeq;
for (iNode = 0; iNode < NJ->nSeq; iNode++) {
NJ->parent[iNode] = NJ->root;
NJ->child[NJ->root].child[iNode] = iNode;
}
if (NJ->nSeq == 1) {
NJ->branchlength[0] = 0;
} else {
assert (NJ->nSeq == 2);
besthit_t hit;
SeqDist(NJ->profiles[0]->codes,NJ->profiles[1]->codes,NJ->nPos,NJ->distance_matrix,/*OUT*/&hit);
NJ->branchlength[0] = hit.dist/2.0;
NJ->branchlength[1] = hit.dist/2.0;
}
return;
}
/* else 3 or more sequences */
/* The visible set stores the best hit of each node (unless using top hits, in which case
it is handled by the top hits routines) */
besthit_t *visible = NULL; /* Not used if doing top hits */
besthit_t *besthitNew = NULL; /* All hits of new node -- not used if doing top-hits */
/* The top-hits lists, with the key parameter m = length of each top-hit list */
top_hits_t *tophits = NULL;
int m = 0; /* maximum length of a top-hits list */
if (tophitsMult > 0) {
m = (int)(0.5 + tophitsMult*sqrt(NJ->nSeq));
if(m<4 || 2*m >= NJ->nSeq) {
m=0;
if(verbose>1) fprintf(stderr,"Too few leaves, turning off top-hits\n");
} else {
if(verbose>2) fprintf(stderr,"Top-hit-list size = %d of %d\n", m, NJ->nSeq);
}
}
assert(!(slow && m>0));
/* Initialize top-hits or visible set */
if (m>0) {
tophits = InitTopHits(NJ, m);
SetAllLeafTopHits(/*IN/UPDATE*/NJ, /*OUT*/tophits);
ResetTopVisible(/*IN/UPDATE*/NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/tophits);
} else if (!slow) {
visible = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnodes);
besthitNew = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnodes);
for (iNode = 0; iNode < NJ->nSeq; iNode++)
SetBestHit(iNode, NJ, /*nActive*/NJ->nSeq, /*OUT*/&visible[iNode], /*OUT IGNORED*/NULL);
}
/* Iterate over joins */
int nActiveOutProfileReset = NJ->nSeq;
int nActive;
for (nActive = NJ->nSeq; nActive > 3; nActive--) {
int nJoinsDone = NJ->nSeq - nActive;
if (nJoinsDone > 0 && (nJoinsDone % 100) == 0)
ProgressReport("Joined %6d of %6d", nJoinsDone, NJ->nSeq-3, 0, 0);
besthit_t join; /* the join to do */
if (slow) {
ExhaustiveNJSearch(NJ,nActive,/*OUT*/&join);
} else if (m>0) {
TopHitNJSearch(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, /*OUT*/&join);
} else {
FastNJSearch(NJ, nActive, /*IN/OUT*/visible, /*OUT*/&join);
}
if (verbose>2) {
double penalty = constraintWeight
* (double)JoinConstraintPenalty(NJ, join.i, join.j);
if (penalty > 0.001) {
fprintf(stderr, "Constraint violation during neighbor-joining %d %d into %d penalty %.3f\n",
join.i, join.j, NJ->maxnode, penalty);
int iC;
for (iC = 0; iC < NJ->nConstraints; iC++) {
int local = JoinConstraintPenaltyPiece(NJ, join.i, join.j, iC);
if (local > 0)
fprintf(stderr, "Constraint %d piece %d %d/%d %d/%d %d/%d\n", iC, local,
NJ->profiles[join.i]->nOn[iC],
NJ->profiles[join.i]->nOff[iC],
NJ->profiles[join.j]->nOn[iC],
NJ->profiles[join.j]->nOff[iC],
NJ->outprofile->nOn[iC] - NJ->profiles[join.i]->nOn[iC] - NJ->profiles[join.j]->nOn[iC],
NJ->outprofile->nOff[iC] - NJ->profiles[join.i]->nOff[iC] - NJ->profiles[join.j]->nOff[iC]);
}
}
}
/* because of the stale out-distance heuristic, make sure that these are up-to-date */
SetOutDistance(NJ, join.i, nActive);
SetOutDistance(NJ, join.j, nActive);
/* Make sure weight is set and criterion is up to date */
SetDistCriterion(NJ, nActive, /*IN/OUT*/&join);
assert(NJ->nOutDistActive[join.i] == nActive);
assert(NJ->nOutDistActive[join.j] == nActive);
int newnode = NJ->maxnode++;
NJ->parent[join.i] = newnode;
NJ->parent[join.j] = newnode;
NJ->child[newnode].nChild = 2;
NJ->child[newnode].child[0] = join.i < join.j ? join.i : join.j;
NJ->child[newnode].child[1] = join.i > join.j ? join.i : join.j;
double rawIJ = join.dist + NJ->diameter[join.i] + NJ->diameter[join.j];
double distIJ = join.dist;
double deltaDist = (NJ->outDistances[join.i]-NJ->outDistances[join.j])/(double)(nActive-2);
NJ->branchlength[join.i] = (distIJ + deltaDist)/2;
NJ->branchlength[join.j] = (distIJ - deltaDist)/2;
double bionjWeight = 0.5; /* IJ = bionjWeight*I + (1-bionjWeight)*J */
double varIJ = rawIJ - NJ->varDiameter[join.i] - NJ->varDiameter[join.j];
if (bionj && join.weight > 0.01 && varIJ > 0.001) {
/* Set bionjWeight according to the BIONJ formula, where
the variance matrix is approximated by
Vij = ProfileVar(i,j) - varDiameter(i) - varDiameter(j)
ProfileVar(i,j) = distance(i,j) = top(i,j)/weight(i,j)
(The node's distance diameter does not affect the variances.)
The BIONJ formula is equation 9 from Gascuel 1997:
bionjWeight = 1/2 + sum(k!=i,j) (Vjk - Vik) / ((nActive-2)*Vij)
sum(k!=i,j) (Vjk - Vik) = sum(k!=i,j) Vik - varDiameter(j) + varDiameter(i)
= sum(k!=i,j) ProfileVar(j,k) - sum(k!=i,j) ProfileVar(i,k) + (nActive-2)*(varDiameter(i)-varDiameter(j))
sum(k!=i,j) ProfileVar(i,k)
~= (sum(k!=i,j) distance(i,k) * weight(i,k))/(mean(k!=i,j) weight(i,k))
~= (N-2) * top(i, Out-i-j) / weight(i, Out-i-j)
weight(i, Out-i-j) = N*weight(i,Out) - weight(i,i) - weight(i,j)
top(i, Out-i-j) = N*top(i,Out) - top(i,i) - top(i,j)
*/
besthit_t outI;
besthit_t outJ;
ProfileDist(NJ->profiles[join.i],NJ->outprofile,NJ->nPos,NJ->distance_matrix,/*OUT*/&outI);
ProfileDist(NJ->profiles[join.j],NJ->outprofile,NJ->nPos,NJ->distance_matrix,/*OUT*/&outJ);
outprofileOps += 2;
double varIWeight = (nActive * outI.weight - NJ->selfweight[join.i] - join.weight);
double varJWeight = (nActive * outJ.weight - NJ->selfweight[join.j] - join.weight);
double varITop = outI.dist * outI.weight * nActive
- NJ->selfdist[join.i] * NJ->selfweight[join.i] - rawIJ * join.weight;
double varJTop = outJ.dist * outJ.weight * nActive
- NJ->selfdist[join.j] * NJ->selfweight[join.j] - rawIJ * join.weight;
double deltaProfileVarOut = (nActive-2) * (varJTop/varJWeight - varITop/varIWeight);
double deltaVarDiam = (nActive-2)*(NJ->varDiameter[join.i] - NJ->varDiameter[join.j]);
if (varJWeight > 0.01 && varIWeight > 0.01)
bionjWeight = 0.5 + (deltaProfileVarOut+deltaVarDiam)/(2*(nActive-2)*varIJ);
if(bionjWeight<0) bionjWeight=0;
if(bionjWeight>1) bionjWeight=1;
if (verbose>2) fprintf(stderr,"dVarO %f dVarDiam %f varIJ %f from dist %f weight %f (pos %d) bionjWeight %f %f\n",
deltaProfileVarOut, deltaVarDiam,
varIJ, join.dist, join.weight, NJ->nPos,
bionjWeight, 1-bionjWeight);
if (verbose>3 && (newnode%5) == 0) {
/* Compare weight estimated from outprofiles from weight made by summing over other nodes */
double deltaProfileVarTot = 0;
for (iNode = 0; iNode < newnode; iNode++) {
if (NJ->parent[iNode] < 0) { /* excludes join.i, join.j */
besthit_t di, dj;
ProfileDist(NJ->profiles[join.i],NJ->profiles[iNode],NJ->nPos,NJ->distance_matrix,/*OUT*/&di);
ProfileDist(NJ->profiles[join.j],NJ->profiles[iNode],NJ->nPos,NJ->distance_matrix,/*OUT*/&dj);
deltaProfileVarTot += dj.dist - di.dist;
}
}
double lambdaTot = 0.5 + (deltaProfileVarTot+deltaVarDiam)/(2*(nActive-2)*varIJ);
if (lambdaTot < 0) lambdaTot = 0;
if (lambdaTot > 1) lambdaTot = 1;
if (fabs(bionjWeight-lambdaTot) > 0.01 || verbose > 4)
fprintf(stderr, "deltaProfileVar actual %.6f estimated %.6f lambda actual %.3f estimated %.3f\n",
deltaProfileVarTot,deltaProfileVarOut,lambdaTot,bionjWeight);
}
}
if (verbose > 2) fprintf(stderr, "Join\t%d\t%d\t%.6f\tlambda\t%.6f\tselfw\t%.3f\t%.3f\tnew\t%d\n",
join.i < join.j ? join.i : join.j,
join.i < join.j ? join.j : join.i,
join.criterion, bionjWeight,
NJ->selfweight[join.i < join.j ? join.i : join.j],
NJ->selfweight[join.i < join.j ? join.j : join.i],
newnode);
NJ->diameter[newnode] = bionjWeight * (NJ->branchlength[join.i] + NJ->diameter[join.i])
+ (1-bionjWeight) * (NJ->branchlength[join.j] + NJ->diameter[join.j]);
NJ->varDiameter[newnode] = bionjWeight * NJ->varDiameter[join.i]
+ (1-bionjWeight) * NJ->varDiameter[join.j]
+ bionjWeight * (1-bionjWeight) * varIJ;
NJ->profiles[newnode] = AverageProfile(NJ->profiles[join.i],NJ->profiles[join.j],
NJ->nPos, NJ->nConstraints,
NJ->distance_matrix,
bionj ? bionjWeight : /*noweight*/-1.0);
/* Update out-distances and total diameters */
int changedActiveOutProfile = nActiveOutProfileReset - (nActive-1);
if (changedActiveOutProfile >= nResetOutProfile
&& changedActiveOutProfile >= fResetOutProfile * nActiveOutProfileReset) {
/* Recompute the outprofile from scratch to avoid roundoff error */
profile_t **activeProfiles = (profile_t**)mymalloc(sizeof(profile_t*)*(nActive-1));
int nSaved = 0;
NJ->totdiam = 0;
for (iNode=0;iNode<NJ->maxnode;iNode++) {
if (NJ->parent[iNode]<0) {
assert(nSaved < nActive-1);
activeProfiles[nSaved++] = NJ->profiles[iNode];
NJ->totdiam += NJ->diameter[iNode];
}
}
assert(nSaved==nActive-1);
FreeProfile(NJ->outprofile, NJ->nPos, NJ->nConstraints);
if(verbose>2) fprintf(stderr,"Recomputing outprofile %d %d\n",nActiveOutProfileReset,nActive-1);
NJ->outprofile = OutProfile(activeProfiles, nSaved,
NJ->nPos, NJ->nConstraints,
NJ->distance_matrix);
activeProfiles = myfree(activeProfiles, sizeof(profile_t*)*(nActive-1));
nActiveOutProfileReset = nActive-1;
} else {
UpdateOutProfile(/*OUT*/NJ->outprofile,
NJ->profiles[join.i], NJ->profiles[join.j], NJ->profiles[newnode],
nActive,
NJ->nPos, NJ->nConstraints,
NJ->distance_matrix);
NJ->totdiam += NJ->diameter[newnode] - NJ->diameter[join.i] - NJ->diameter[join.j];
}
/* Store self-dist for use in other computations */
besthit_t selfdist;
ProfileDist(NJ->profiles[newnode],NJ->profiles[newnode],NJ->nPos,NJ->distance_matrix,/*OUT*/&selfdist);
NJ->selfdist[newnode] = selfdist.dist;
NJ->selfweight[newnode] = selfdist.weight;
/* Find the best hit of the joined node IJ */
if (m>0) {
TopHitJoin(newnode, /*IN/UPDATE*/NJ, nActive-1, /*IN/OUT*/tophits);
} else {
/* Not using top-hits, so we update all out-distances */
for (iNode = 0; iNode < NJ->maxnode; iNode++) {
if (NJ->parent[iNode] < 0) {
/* True nActive is now nActive-1 */
SetOutDistance(/*IN/UPDATE*/NJ, iNode, nActive-1);
}
}
if(visible != NULL) {
SetBestHit(newnode, NJ, nActive-1, /*OUT*/&visible[newnode], /*OUT OPTIONAL*/besthitNew);
if (verbose>2)
fprintf(stderr,"Visible %d %d %f %f\n",
visible[newnode].i, visible[newnode].j,
visible[newnode].dist, visible[newnode].criterion);
if (besthitNew != NULL) {
/* Use distances to new node to update visible set entries that are non-optimal */
for (iNode = 0; iNode < NJ->maxnode; iNode++) {
if (NJ->parent[iNode] >= 0 || iNode == newnode)
continue;
int iOldVisible = visible[iNode].j;
assert(iOldVisible>=0);
assert(visible[iNode].i == iNode);
/* Update the criterion; use nActive-1 because haven't decremented nActive yet */
if (NJ->parent[iOldVisible] < 0)
SetCriterion(/*IN/OUT*/NJ, nActive-1, &visible[iNode]);
if (NJ->parent[iOldVisible] >= 0
|| besthitNew[iNode].criterion < visible[iNode].criterion) {
if(verbose>3) fprintf(stderr,"Visible %d reset from %d to %d (%f vs. %f)\n",
iNode, iOldVisible,
newnode, visible[iNode].criterion, besthitNew[iNode].criterion);
if(NJ->parent[iOldVisible] < 0) nVisibleUpdate++;
visible[iNode].j = newnode;
visible[iNode].dist = besthitNew[iNode].dist;
visible[iNode].criterion = besthitNew[iNode].criterion;
}
} /* end loop over all nodes */
} /* end if recording all hits of new node */
} /* end if keeping a visible set */
} /* end else (m==0) */
} /* end loop over nActive */
#ifdef TRACK_MEMORY
if (verbose>1) {
struct mallinfo mi = mallinfo();
fprintf(stderr, "Memory @ end of FastNJ(): %.2f MB (%.1f byte/pos) useful %.2f expected %.2f\n",
(mi.arena+mi.hblkhd)/1.0e6, (mi.arena+mi.hblkhd)/(double)(NJ->nSeq*(double)NJ->nPos),
mi.uordblks/1.0e6, mymallocUsed/1e6);
}
#endif
/* We no longer need the tophits, visible set, etc. */
if (visible != NULL) visible = myfree(visible,sizeof(besthit_t)*NJ->maxnodes);
if (besthitNew != NULL) besthitNew = myfree(besthitNew,sizeof(besthit_t)*NJ->maxnodes);
tophits = FreeTopHits(tophits);
/* Add a root for the 3 remaining nodes */
int top[3];
int nTop = 0;
for (iNode = 0; iNode < NJ->maxnode; iNode++) {
if (NJ->parent[iNode] < 0) {
assert(nTop <= 2);
top[nTop++] = iNode;
}
}
assert(nTop==3);
NJ->root = NJ->maxnode++;
NJ->child[NJ->root].nChild = 3;
for (nTop = 0; nTop < 3; nTop++) {
NJ->parent[top[nTop]] = NJ->root;
NJ->child[NJ->root].child[nTop] = top[nTop];
}
besthit_t dist01, dist02, dist12;
ProfileDist(NJ->profiles[top[0]], NJ->profiles[top[1]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist01);
ProfileDist(NJ->profiles[top[0]], NJ->profiles[top[2]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist02);
ProfileDist(NJ->profiles[top[1]], NJ->profiles[top[2]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist12);
double d01 = dist01.dist - NJ->diameter[top[0]] - NJ->diameter[top[1]];
double d02 = dist02.dist - NJ->diameter[top[0]] - NJ->diameter[top[2]];
double d12 = dist12.dist - NJ->diameter[top[1]] - NJ->diameter[top[2]];
NJ->branchlength[top[0]] = (d01 + d02 - d12)/2;
NJ->branchlength[top[1]] = (d01 + d12 - d02)/2;
NJ->branchlength[top[2]] = (d02 + d12 - d01)/2;
/* Check how accurate the outprofile is */
if (verbose>2) {
profile_t *p[3] = {NJ->profiles[top[0]], NJ->profiles[top[1]], NJ->profiles[top[2]]};
profile_t *out = OutProfile(p, 3, NJ->nPos, NJ->nConstraints, NJ->distance_matrix);
int i;
double freqerror = 0;
double weighterror = 0;
for (i=0;i<NJ->nPos;i++) {
weighterror += fabs(out->weights[i] - NJ->outprofile->weights[i]);
int k;
for(k=0;k<nCodes;k++)
freqerror += fabs(out->vectors[nCodes*i+k] - NJ->outprofile->vectors[nCodes*i+k]);
}
fprintf(stderr,"Roundoff error in outprofile@end: WeightError %f FreqError %f\n", weighterror, freqerror);
FreeProfile(out, NJ->nPos, NJ->nConstraints);
}
return;
}
void ExhaustiveNJSearch(NJ_t *NJ, int nActive, /*OUT*/besthit_t *join) {
join->i = -1;
join->j = -1;
join->weight = 0;
join->dist = 1e20;
join->criterion = 1e20;
double bestCriterion = 1e20;
int i, j;
for (i = 0; i < NJ->maxnode-1; i++) {
if (NJ->parent[i] < 0) {
for (j = i+1; j < NJ->maxnode; j++) {
if (NJ->parent[j] < 0) {
besthit_t hit;
hit.i = i;
hit.j = j;
SetDistCriterion(NJ, nActive, /*IN/OUT*/&hit);
if (hit.criterion < bestCriterion) {
*join = hit;
bestCriterion = hit.criterion;
}
}
}
}
}
assert (join->i >= 0 && join->j >= 0);
}
void FastNJSearch(NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *besthits, /*OUT*/besthit_t *join) {
join->i = -1;
join->j = -1;
join->dist = 1e20;
join->weight = 0;
join->criterion = 1e20;
int iNode;
for (iNode = 0; iNode < NJ->maxnode; iNode++) {
int jNode = besthits[iNode].j;
if (NJ->parent[iNode] < 0 && NJ->parent[jNode] < 0) { /* both i and j still active */
/* recompute criterion to reflect the current out-distances */
SetCriterion(NJ, nActive, /*IN/OUT*/&besthits[iNode]);
if (besthits[iNode].criterion < join->criterion)
*join = besthits[iNode];
}
}
if(!fastest) {
int changed;
do {
changed = 0;
assert(join->i >= 0 && join->j >= 0);
SetBestHit(join->i, NJ, nActive, /*OUT*/&besthits[join->i], /*OUT IGNORED*/NULL);
if (besthits[join->i].j != join->j) {
changed = 1;
if (verbose>2)
fprintf(stderr,"BetterI\t%d\t%d\t%d\t%d\t%f\t%f\n",
join->i,join->j,besthits[join->i].i,besthits[join->i].j,
join->criterion,besthits[join->i].criterion);
}
/* Save the best hit either way, because the out-distance has probably changed
since we started the computation. */
join->j = besthits[join->i].j;
join->weight = besthits[join->i].weight;
join->dist = besthits[join->i].dist;
join->criterion = besthits[join->i].criterion;
SetBestHit(join->j, NJ, nActive, /*OUT*/&besthits[join->j], /*OUT IGNORE*/NULL);
if (besthits[join->j].j != join->i) {
changed = 1;
if (verbose>2)
fprintf(stderr,"BetterJ\t%d\t%d\t%d\t%d\t%f\t%f\n",
join->i,join->j,besthits[join->j].i,besthits[join->j].j,
join->criterion,besthits[join->j].criterion);
join->i = besthits[join->j].j;
join->weight = besthits[join->j].weight;
join->dist = besthits[join->j].dist;
join->criterion = besthits[join->j].criterion;
}
if(changed) nHillBetter++;
} while(changed);
}
}
/* A token is one of ():;, or an alphanumeric string without whitespace
Any whitespace between tokens is ignored */
char *ReadTreeToken(FILE *fp) {
static char buf[BUFFER_SIZE];
int len = 0;
int c;
for (c = fgetc(fp); c != EOF; c = fgetc(fp)) {
if (c == '(' || c == ')' || c == ':' || c == ';' || c == ',') {
/* standalone token */
if (len == 0) {
buf[len++] = c;
buf[len] = '\0';
return(buf);
} else {
ungetc(c, fp);
buf[len] = '\0';
return(buf);
}
} else if (isspace(c)) {
if (len > 0) {
buf[len] = '\0';
return(buf);
}
/* else ignore whitespace at beginning of token */
} else {
/* not whitespace or standalone token */
buf[len++] = c;
if (len >= BUFFER_SIZE) {
buf[BUFFER_SIZE-1] = '\0';
fprintf(stderr, "Token too long in tree file, token begins with\n%s\n", buf);
exit(1);
}
}
}
if (len > 0) {
/* return the token we have so far */
buf[len] = '\0';
return(buf);
}
/* else */
return(NULL);
}
void ReadTreeError(char *err, char *token) {
fprintf(stderr, "Tree parse error: unexpected token '%s' -- %s\n",
token == NULL ? "(End of file)" : token,
err);
exit(1);
}
void ReadTreeAddChild(int parent, int child, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children) {
assert(parent >= 0);
assert(child >= 0);
assert(parents[child] < 0);
assert(children[parent].nChild < 3);
parents[child] = parent;
children[parent].child[children[parent].nChild++] = child;
}
void ReadTreeMaybeAddLeaf(int parent, char *name,
hashstrings_t *hashnames, uniquify_t *unique,
/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children) {
hashiterator_t hi = FindMatch(hashnames,name);
if (HashCount(hashnames,hi) != 1)
ReadTreeError("not recognized as a sequence name", name);
int iSeqNonunique = HashFirst(hashnames,hi);
assert(iSeqNonunique >= 0 && iSeqNonunique < unique->nSeq);
int iSeqUnique = unique->alnToUniq[iSeqNonunique];
assert(iSeqUnique >= 0 && iSeqUnique < unique->nUnique);
/* Either record this leaves' parent (if it is -1) or ignore this leaf (if already seen) */
if (parents[iSeqUnique] < 0) {
ReadTreeAddChild(parent, iSeqUnique, /*IN/OUT*/parents, /*IN/OUT*/children);
if(verbose > 5)
fprintf(stderr, "Found leaf uniq%d name %s child of %d\n", iSeqUnique, name, parent);
} else {
if (verbose > 5)
fprintf(stderr, "Skipped redundant leaf uniq%d name %s\n", iSeqUnique, name);
}
}
void ReadTreeRemove(/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children, int node) {
if(verbose > 5)
fprintf(stderr,"Removing node %d parent %d\n", node, parents[node]);
assert(parents[node] >= 0);
int parent = parents[node];
parents[node] = -1;
children_t *pc = &children[parent];
int oldn;
for (oldn = 0; oldn < pc->nChild; oldn++) {
if (pc->child[oldn] == node)
break;
}
assert(oldn < pc->nChild);
/* move successor nodes back in child list and shorten list */
int i;
for (i = oldn; i < pc->nChild-1; i++)
pc->child[i] = pc->child[i+1];
pc->nChild--;
/* add its children to parent's child list */
children_t *nc = &children[node];
if (nc->nChild > 0) {
assert(nc->nChild<=2);
assert(pc->nChild < 3);
assert(pc->nChild + nc->nChild <= 3);
int j;
for (j = 0; j < nc->nChild; j++) {
if(verbose > 5)
fprintf(stderr,"Repointing parent %d to child %d\n", parent, nc->child[j]);
pc->child[pc->nChild++] = nc->child[j];
parents[nc->child[j]] = parent;
}
nc->nChild = 0;
}
}
void ReadTree(/*IN/OUT*/NJ_t *NJ,
/*IN*/uniquify_t *unique,
/*IN*/hashstrings_t *hashnames,
/*READ*/FILE *fpInTree) {
assert(NJ->nSeq == unique->nUnique);
/* First, do a preliminary parse of the tree to with non-unique leaves ignored
We need to store this separately from NJ because it may have too many internal nodes
(matching sequences show up once in the NJ but could be in multiple places in the tree)
Will use iUnique as the index of nodes, as in the NJ structure
*/
int maxnodes = unique->nSeq*2;
int maxnode = unique->nSeq;
int *parent = (int*)mymalloc(sizeof(int)*maxnodes);
children_t *children = (children_t *)mymalloc(sizeof(children_t)*maxnodes);
int root = maxnode++;
int i;
for (i = 0; i < maxnodes; i++) {
parent[i] = -1;
children[i].nChild = 0;
}
/* The stack is the current path to the root, with the root at the first (top) position */
int stack_size = 1;
int *stack = (int*)mymalloc(sizeof(int)*maxnodes);
stack[0] = root;
int nDown = 0;
int nUp = 0;
char *token;
token = ReadTreeToken(fpInTree);
if (token == NULL || *token != '(')
ReadTreeError("No '(' at start", token);
/* nDown is still 0 because we have created the root */
while ((token = ReadTreeToken(fpInTree)) != NULL) {
if (nDown > 0) { /* In a stream of parentheses */
if (*token == '(')
nDown++;
else if (*token == ',' || *token == ';' || *token == ':' || *token == ')')
ReadTreeError("while reading parentheses", token);
else {
/* Add intermediate nodes if nDown was > 1 (for nDown=1, the only new node is the leaf) */
while (nDown-- > 0) {
int new = maxnode++;
assert(new < maxnodes);
ReadTreeAddChild(stack[stack_size-1], new, /*IN/OUT*/parent, /*IN/OUT*/children);
if(verbose > 5)
fprintf(stderr, "Added internal child %d of %d, stack size increase to %d\n",
new, stack[stack_size-1],stack_size+1);
stack[stack_size++] = new;
assert(stack_size < maxnodes);
}
ReadTreeMaybeAddLeaf(stack[stack_size-1], token,
hashnames, unique,
/*IN/OUT*/parent, /*IN/OUT*/children);
}
} else if (nUp > 0) {
if (*token == ';') { /* end the tree? */
if (nUp != stack_size)
ReadTreeError("unbalanced parentheses", token);
else
break;
} else if (*token == ')')
nUp++;
else if (*token == '(')
ReadTreeError("unexpected '(' after ')'", token);
else if (*token == ':') {
token = ReadTreeToken(fpInTree);
/* Read the branch length and ignore it */
if (token == NULL || (*token != '-' && !isdigit(*token)))
ReadTreeError("not recognized as a branch length", token);
} else if (*token == ',') {
/* Go back up the stack the correct #times */
while (nUp-- > 0) {
stack_size--;
if(verbose > 5)
fprintf(stderr, "Up to nUp=%d stack size %d at %d\n",
nUp, stack_size, stack[stack_size-1]);
if (stack_size <= 0)
ReadTreeError("too many ')'", token);
}
nUp = 0;
} else if (*token == '-' || isdigit(*token))
; /* ignore bootstrap value */
else
fprintf(stderr, "Warning while parsing tree: non-numeric label %s for internal node\n",
token);
} else if (*token == '(') {
nDown = 1;
} else if (*token == ')') {
nUp = 1;
} else if (*token == ':') {
token = ReadTreeToken(fpInTree);
if (token == NULL || (*token != '-' && !isdigit(*token)))
ReadTreeError("not recognized as a branch length", token);
} else if (*token == ',') {
; /* do nothing */
} else if (*token == ';')
ReadTreeError("unexpected token", token);
else
ReadTreeMaybeAddLeaf(stack[stack_size-1], token,
hashnames, unique,
/*IN/OUT*/parent, /*IN/OUT*/children);
}
/* Verify that all sequences were seen */
for (i = 0; i < unique->nUnique; i++) {
if (parent[i] < 0) {
fprintf(stderr, "Alignment sequence %d (unique %d) absent from input tree\n"
"The starting tree (the argument to -intree) must include all sequences in the alignment!\n",
unique->uniqueFirst[i], i);
exit(1);
}
}
/* Simplify the tree -- remove all internal nodes with < 2 children
Keep trying until no nodes get removed
*/
int nRemoved;
do {
nRemoved = 0;
/* Here stack is the list of nodes we haven't visited yet while doing
a tree traversal */
stack_size = 1;
stack[0] = root;
while (stack_size > 0) {
int node = stack[--stack_size];
if (node >= unique->nUnique) { /* internal node */
if (children[node].nChild <= 1) {
if (node != root) {
ReadTreeRemove(/*IN/OUT*/parent,/*IN/OUT*/children,node);
nRemoved++;
} else if (node == root && children[node].nChild == 1) {
int newroot = children[node].child[0];
parent[newroot] = -1;
children[root].nChild = 0;
nRemoved++;
if(verbose > 5)
fprintf(stderr,"Changed root from %d to %d\n",root,newroot);
root = newroot;
stack[stack_size++] = newroot;
}
} else {
int j;
for (j = 0; j < children[node].nChild; j++) {
assert(stack_size < maxnodes);
stack[stack_size++] = children[node].child[j];
if(verbose > 5)
fprintf(stderr,"Added %d to stack\n", stack[stack_size-1]);
}
}
}
}
} while (nRemoved > 0);
/* Simplify the root node to 3 children if it has 2 */
if (children[root].nChild == 2) {
for (i = 0; i < 2; i++) {
int child = children[root].child[i];
assert(child >= 0 && child < maxnodes);
if (children[child].nChild == 2) {
ReadTreeRemove(parent,children,child); /* replace root -> child -> A,B with root->A,B */
break;
}
}
}
for (i = 0; i < maxnodes; i++)
if(verbose > 5)
fprintf(stderr,"Simplfied node %d has parent %d nchild %d\n",
i, parent[i], children[i].nChild);
/* Map the remaining internal nodes to NJ nodes */
int *map = (int*)mymalloc(sizeof(int)*maxnodes);
for (i = 0; i < unique->nUnique; i++)
map[i] = i;
for (i = unique->nUnique; i < maxnodes; i++)
map[i] = -1;
stack_size = 1;
stack[0] = root;
while (stack_size > 0) {
int node = stack[--stack_size];
if (node >= unique->nUnique) { /* internal node */
assert(node == root || children[node].nChild > 1);
map[node] = NJ->maxnode++;
for (i = 0; i < children[node].nChild; i++) {
assert(stack_size < maxnodes);
stack[stack_size++] = children[node].child[i];
}
}
}
for (i = 0; i < maxnodes; i++)
if(verbose > 5)
fprintf(stderr,"Map %d to %d (parent %d nchild %d)\n",
i, map[i], parent[i], children[i].nChild);
/* Set NJ->parent, NJ->children, NJ->root */
NJ->root = map[root];
int node;
for (node = 0; node < maxnodes; node++) {
int njnode = map[node];
if (njnode >= 0) {
NJ->child[njnode].nChild = children[node].nChild;
for (i = 0; i < children[node].nChild; i++) {
assert(children[node].child[i] >= 0 && children[node].child[i] < maxnodes);
NJ->child[njnode].child[i] = map[children[node].child[i]];
}
if (parent[node] >= 0)
NJ->parent[njnode] = map[parent[node]];
}
}
/* Make sure that parent/child relationships match */
for (i = 0; i < NJ->maxnode; i++) {
children_t *c = &NJ->child[i];
int j;
for (j = 0; j < c->nChild;j++)
assert(c->child[j] >= 0 && c->child[j] < NJ->maxnode && NJ->parent[c->child[j]] == i);
}
assert(NJ->parent[NJ->root] < 0);
map = myfree(map,sizeof(int)*maxnodes);
stack = myfree(stack,sizeof(int)*maxnodes);
children = myfree(children,sizeof(children_t)*maxnodes);
parent = myfree(parent,sizeof(int)*maxnodes);
/* Compute profiles as balanced -- the NNI stage will recompute these
profiles anyway
*/
traversal_t traversal = InitTraversal(NJ);
node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
if (node >= NJ->nSeq && node != NJ->root)
SetProfile(/*IN/OUT*/NJ, node, /*noweight*/-1.0);
}
traversal = FreeTraversal(traversal,NJ);
}
/* Print topology using node indices as node names */
void PrintNJInternal(FILE *fp, NJ_t *NJ, bool useLen) {
if (NJ->nSeq < 4) {
return;
}
typedef struct { int node; int end; } stack_t;
stack_t *stack = (stack_t *)mymalloc(sizeof(stack_t)*NJ->maxnodes);
int stackSize = 1;
stack[0].node = NJ->root;
stack[0].end = 0;
while(stackSize>0) {
stack_t *last = &stack[stackSize-1];
stackSize--;
/* Save last, as we are about to overwrite it */
int node = last->node;
int end = last->end;
if (node < NJ->nSeq) {
if (NJ->child[NJ->parent[node]].child[0] != node) fputs(",",fp);
fprintf(fp, "%d", node);
if (useLen)
fprintf(fp, ":%.4f", NJ->branchlength[node]);
} else if (end) {
fprintf(fp, ")%d", node);
if (useLen)
fprintf(fp, ":%.4f", NJ->branchlength[node]);
} else {
if (node != NJ->root && NJ->child[NJ->parent[node]].child[0] != node) fprintf(fp, ",");
fprintf(fp, "(");
stackSize++;
stack[stackSize-1].node = node;
stack[stackSize-1].end = 1;
children_t *c = &NJ->child[node];
/* put children on in reverse order because we use the last one first */
int i;
for (i = c->nChild-1; i >=0; i--) {
stackSize++;
stack[stackSize-1].node = c->child[i];
stack[stackSize-1].end = 0;
}
}
}
fprintf(fp, ";\n");
stack = myfree(stack, sizeof(stack_t)*NJ->maxnodes);
}
void PrintNJ(FILE *fp, NJ_t *NJ, char **names, uniquify_t *unique, bool bShowSupport, bool bQuote) {
/* And print the tree: depth first search
* The stack contains
* list of remaining children with their depth
* parent node, with a flag of -1 so I know to print right-paren
*/
if (NJ->nSeq==1 && unique->alnNext[unique->uniqueFirst[0]] >= 0) {
/* Special case -- otherwise we end up with double parens */
int first = unique->uniqueFirst[0];
assert(first >= 0 && first < unique->nSeq);
fprintf(fp, bQuote ? "('%s':0.0" : "(%s:0.0", names[first]);
int iName = unique->alnNext[first];
while (iName >= 0) {
assert(iName < unique->nSeq);
fprintf(fp, bQuote ? ",'%s':0.0" : ",%s:0.0", names[iName]);
iName = unique->alnNext[iName];
}
fprintf(fp,");\n");
return;
}
typedef struct { int node; int end; } stack_t;
stack_t *stack = (stack_t *)mymalloc(sizeof(stack_t)*NJ->maxnodes);
int stackSize = 1;
stack[0].node = NJ->root;
stack[0].end = 0;
while(stackSize>0) {
stack_t *last = &stack[stackSize-1];
stackSize--;
/* Save last, as we are about to overwrite it */
int node = last->node;
int end = last->end;
if (node < NJ->nSeq) {
if (NJ->child[NJ->parent[node]].child[0] != node) fputs(",",fp);
int first = unique->uniqueFirst[node];
assert(first >= 0 && first < unique->nSeq);
/* Print the name, or the subtree of duplicate names */
if (unique->alnNext[first] == -1) {
fprintf(fp, bQuote ? "'%s'" : "%s", names[first]);
} else {
fprintf(fp, bQuote ? "('%s':0.0" : "(%s:0.0", names[first]);
int iName = unique->alnNext[first];
while (iName >= 0) {
assert(iName < unique->nSeq);
fprintf(fp, bQuote ? ",'%s':0.0" : ",%s:0.0", names[iName]);
iName = unique->alnNext[iName];
}
fprintf(fp,")");
}
/* Print the branch length */
#ifdef USE_DOUBLE
#define FP_FORMAT "%.9f"
#else
#define FP_FORMAT "%.5f"
#endif
fprintf(fp, ":" FP_FORMAT, NJ->branchlength[node]);
} else if (end) {
if (node == NJ->root)
fprintf(fp, ")");
else if (bShowSupport)
fprintf(fp, ")%.3f:" FP_FORMAT, NJ->support[node], NJ->branchlength[node]);
else
fprintf(fp, "):" FP_FORMAT, NJ->branchlength[node]);
} else {
if (node != NJ->root && NJ->child[NJ->parent[node]].child[0] != node) fprintf(fp, ",");
fprintf(fp, "(");
stackSize++;
stack[stackSize-1].node = node;
stack[stackSize-1].end = 1;
children_t *c = &NJ->child[node];
/* put children on in reverse order because we use the last one first */
int i;
for (i = c->nChild-1; i >=0; i--) {
stackSize++;
stack[stackSize-1].node = c->child[i];
stack[stackSize-1].end = 0;
}
}
}
fprintf(fp, ";\n");
stack = myfree(stack, sizeof(stack_t)*NJ->maxnodes);
}
alignment_t *ReadAlignment(/*IN*/FILE *fp, bool bQuote) {
/* bQuote supports the -quote option */
int nSeq = 0;
int nPos = 0;
char **names = NULL;
char **seqs = NULL;
char buf[BUFFER_SIZE] = "";
if (fgets(buf,sizeof(buf),fp) == NULL) {
fprintf(stderr, "Error reading header line\n");
exit(1);
}
int nSaved = 100;
if (buf[0] == '>') {
/* FASTA, truncate names at any of these */
char *nameStop = bQuote ? "'\t\r\n" : "(),: \t\r\n";
char *seqSkip = " \t\r\n"; /* skip these characters in the sequence */
seqs = (char**)mymalloc(sizeof(char*) * nSaved);
names = (char**)mymalloc(sizeof(char*) * nSaved);
do {
/* loop over lines */
if (buf[0] == '>') {
/* truncate the name */
char *p, *q;
for (p = buf+1; *p != '\0'; p++) {
for (q = nameStop; *q != '\0'; q++) {
if (*p == *q) {
*p = '\0';
break;
}
}
if (*p == '\0') break;
}
/* allocate space for another sequence */
nSeq++;
if (nSeq > nSaved) {
int nNewSaved = nSaved*2;
seqs = myrealloc(seqs,sizeof(char*)*nSaved,sizeof(char*)*nNewSaved, /*copy*/false);
names = myrealloc(names,sizeof(char*)*nSaved,sizeof(char*)*nNewSaved, /*copy*/false);
nSaved = nNewSaved;
}
names[nSeq-1] = (char*)mymemdup(buf+1,strlen(buf));
seqs[nSeq-1] = NULL;
} else {
/* count non-space characters and append to sequence */
int nKeep = 0;
char *p, *q;
for (p=buf; *p != '\0'; p++) {
for (q=seqSkip; *q != '\0'; q++) {
if (*p == *q)
break;
}
if (*p != *q)
nKeep++;
}
int nOld = (seqs[nSeq-1] == NULL) ? 0 : strlen(seqs[nSeq-1]);
seqs[nSeq-1] = (char*)myrealloc(seqs[nSeq-1], nOld, nOld+nKeep+1, /*copy*/false);
if (nOld+nKeep > nPos)
nPos = nOld + nKeep;
char *out = seqs[nSeq-1] + nOld;
for (p=buf; *p != '\0'; p++) {
for (q=seqSkip; *q != '\0'; q++) {
if (*p == *q)
break;
}
if (*p != *q) {
*out = *p;
out++;
}
}
assert(out-seqs[nSeq-1] == nKeep + nOld);
*out = '\0';
}
} while(fgets(buf,sizeof(buf),fp) != NULL);
if (seqs[nSeq-1] == NULL) {
fprintf(stderr, "No sequence data for last entry %s\n",names[nSeq-1]);
exit(1);
}
names = myrealloc(names,sizeof(char*)*nSaved,sizeof(char*)*nSeq, /*copy*/false);
seqs = myrealloc(seqs,sizeof(char*)*nSaved,sizeof(char*)*nSeq, /*copy*/false);
} else {
/* PHYLIP interleaved-like format
Allow arbitrary length names, require spaces between names and sequences
Allow multiple alignments, either separated by a single empty line (e.g. seqboot output)
or not.
*/
if (buf[0] == '\n' || buf[0] == '\r') {
if (fgets(buf,sizeof(buf),fp) == NULL) {
fprintf(stderr, "Empty header line followed by EOF\n");
exit(1);
}
}
if (sscanf(buf, "%d%d", &nSeq, &nPos) != 2
|| nSeq < 1 || nPos < 1) {
fprintf(stderr, "Error parsing header line:%s\n", buf);
exit(1);
}
names = (char **)mymalloc(sizeof(char*) * nSeq);
seqs = (char **)mymalloc(sizeof(char*) * nSeq);
nSaved = nSeq;
int i;
for (i = 0; i < nSeq; i++) {
names[i] = NULL;
seqs[i] = (char *)mymalloc(nPos+1); /* null-terminate */
seqs[i][0] = '\0';
}
int iSeq = 0;
while(fgets(buf,sizeof(buf),fp)) {
if ((buf[0] == '\n' || buf[0] == '\r') && (iSeq == nSeq || iSeq == 0)) {
iSeq = 0;
} else {
int j = 0; /* character just past end of name */
if (buf[0] == ' ') {
if (names[iSeq] == NULL) {
fprintf(stderr, "No name in phylip line %s", buf);
exit(1);
}
} else {
while (buf[j] != '\n' && buf[j] != '\0' && buf[j] != ' ')
j++;
if (buf[j] != ' ' || j == 0) {
fprintf(stderr, "No sequence in phylip line %s", buf);
exit(1);
}
if (iSeq >= nSeq) {
fprintf(stderr, "No empty line between sequence blocks (is the sequence count wrong?)\n");
exit(1);
}
if (names[iSeq] == NULL) {
/* save the name */
names[iSeq] = (char *)mymalloc(j+1);
int k;
for (k = 0; k < j; k++) names[iSeq][k] = buf[k];
names[iSeq][j] = '\0';
} else {
/* check the name */
int k;
int match = 1;
for (k = 0; k < j; k++) {
if (names[iSeq][k] != buf[k]) {
match = 0;
break;
}
}
if (!match || names[iSeq][j] != '\0') {
fprintf(stderr, "Wrong name in phylip line %s\nExpected %s\n", buf, names[iSeq]);
exit(1);
}
}
}
int seqlen = strlen(seqs[iSeq]);
for (; buf[j] != '\n' && buf[j] != '\0'; j++) {
if (buf[j] != ' ') {
if (seqlen >= nPos) {
fprintf(stderr, "Too many characters (expected %d) for sequence named %s\nSo far have:\n%s\n",
nPos, names[iSeq], seqs[iSeq]);
exit(1);
}
seqs[iSeq][seqlen++] = toupper(buf[j]);
}
}
seqs[iSeq][seqlen] = '\0'; /* null-terminate */
if(verbose>10) fprintf(stderr,"Read iSeq %d name %s seqsofar %s\n", iSeq, names[iSeq], seqs[iSeq]);
iSeq++;
if (iSeq == nSeq && strlen(seqs[0]) == nPos)
break; /* finished alignment */
} /* end else non-empty phylip line */
}
if (iSeq != nSeq && iSeq != 0) {
fprintf(stderr, "Wrong number of sequences: expected %d\n", nSeq);
exit(1);
}
}
/* Check lengths of sequences */
int i;
for (i = 0; i < nSeq; i++) {
int seqlen = strlen(seqs[i]);
if (seqlen != nPos) {
fprintf(stderr, "Wrong number of characters for %s: expected %d but have %d instead.\n"
"This sequence may be truncated, or another sequence may be too long.\n",
names[i], nPos, seqlen);
exit(1);
}
}
/* Replace "." with "-" and warn if we find any */
/* If nucleotide sequences, replace U with T and N with X */
bool findDot = false;
for (i = 0; i < nSeq; i++) {
char *p;
for (p = seqs[i]; *p != '\0'; p++) {
if (*p == '.') {
findDot = true;
*p = '-';
}
if (nCodes == 4 && *p == 'U')
*p = 'T';
if (nCodes == 4 && *p == 'N')
*p = 'X';
}
}
if (findDot)
fprintf(stderr, "Warning! Found \".\" character(s). These are treated as gaps\n");
if (ferror(fp)) {
fprintf(stderr, "Error reading input file\n");
exit(1);
}
alignment_t *align = (alignment_t*)mymalloc(sizeof(alignment_t));
align->nSeq = nSeq;
align->nPos = nPos;
align->names = names;
align->seqs = seqs;
align->nSaved = nSaved;
return(align);
}
void FreeAlignmentSeqs(/*IN/OUT*/alignment_t *aln) {
assert(aln != NULL);
int i;
for (i = 0; i < aln->nSeq; i++)
aln->seqs[i] = myfree(aln->seqs[i], aln->nPos+1);
}
alignment_t *FreeAlignment(alignment_t *aln) {
if(aln==NULL)
return(NULL);
int i;
for (i = 0; i < aln->nSeq; i++) {
aln->names[i] = myfree(aln->names[i],strlen(aln->names[i])+1);
aln->seqs[i] = myfree(aln->seqs[i], aln->nPos+1);
}
aln->names = myfree(aln->names, sizeof(char*)*aln->nSaved);
aln->seqs = myfree(aln->seqs, sizeof(char*)*aln->nSaved);
myfree(aln, sizeof(alignment_t));
return(NULL);
}
char **AlnToConstraints(alignment_t *constraints, uniquify_t *unique, hashstrings_t *hashnames) {
/* look up constraints as names and map to unique-space */
char ** uniqConstraints = (char**)mymalloc(sizeof(char*) * unique->nUnique);
int i;
for (i = 0; i < unique->nUnique; i++)
uniqConstraints[i] = NULL;
for (i = 0; i < constraints->nSeq; i++) {
char *name = constraints->names[i];
char *constraintSeq = constraints->seqs[i];
hashiterator_t hi = FindMatch(hashnames,name);
if (HashCount(hashnames,hi) != 1) {
fprintf(stderr, "Sequence %s from constraints file is not in the alignment\n", name);
exit(1);
}
int iSeqNonunique = HashFirst(hashnames,hi);
assert(iSeqNonunique >= 0 && iSeqNonunique < unique->nSeq);
int iSeqUnique = unique->alnToUniq[iSeqNonunique];
assert(iSeqUnique >= 0 && iSeqUnique < unique->nUnique);
if (uniqConstraints[iSeqUnique] != NULL) {
/* Already set a constraint for this group of sequences!
Warn that we are ignoring this one unless the constraints match */
if (strcmp(uniqConstraints[iSeqUnique],constraintSeq) != 0) {
fprintf(stderr,
"Warning: ignoring constraints for %s:\n%s\n"
"Another sequence has the same sequence but different constraints\n",
name, constraintSeq);
}
} else {
uniqConstraints[iSeqUnique] = constraintSeq;
}
}
return(uniqConstraints);
}
profile_t *SeqToProfile(/*IN/OUT*/NJ_t *NJ,
char *seq, int nPos,
/*OPTIONAL*/char *constraintSeq, int nConstraints,
int iNode,
unsigned long counts[256]) {
static unsigned char charToCode[256];
static int codeSet = 0;
int c, i;
if (!codeSet) {
for (c = 0; c < 256; c++) {
charToCode[c] = nCodes;
}
for (i = 0; codesString[i]; i++) {
charToCode[codesString[i]] = i;
charToCode[tolower(codesString[i])] = i;
}
charToCode['-'] = NOCODE;
codeSet=1;
}
assert(strlen(seq) == nPos);
profile_t *profile = NewProfile(nPos,nConstraints);
for (i = 0; i < nPos; i++) {
unsigned int character = (unsigned int) seq[i];
counts[character]++;
c = charToCode[character];
if(verbose>10 && i < 2) fprintf(stderr,"pos %d char %c code %d\n", i, seq[i], c);
/* treat unknowns as gaps */
if (c == nCodes || c == NOCODE) {
profile->codes[i] = NOCODE;
profile->weights[i] = 0.0;
} else {
profile->codes[i] = c;
profile->weights[i] = 1.0;
}
}
if (nConstraints > 0) {
for (i = 0; i < nConstraints; i++) {
profile->nOn[i] = 0;
profile->nOff[i] = 0;
}
bool bWarn = false;
if (constraintSeq != NULL) {
assert(strlen(constraintSeq) == nConstraints);
for (i = 0; i < nConstraints; i++) {
if (constraintSeq[i] == '1') {
profile->nOn[i] = 1;
} else if (constraintSeq[i] == '0') {
profile->nOff[i] = 1;
} else if (constraintSeq[i] != '-') {
if (!bWarn) {
fprintf(stderr, "Constraint characters in unique sequence %d replaced with gap:", iNode+1);
bWarn = true;
}
fprintf(stderr, " %c%d", constraintSeq[i], i+1);
/* For the benefit of ConstraintSequencePenalty -- this is a bit of a hack, as
this modifies the value read from the alignment
*/
constraintSeq[i] = '-';
}
}
if (bWarn)
fprintf(stderr, "\n");
}
}
return profile;
}
void SeqDist(unsigned char *codes1, unsigned char *codes2, int nPos,
distance_matrix_t *dmat,
/*OUT*/besthit_t *hit) {
double top = 0; /* summed over positions */
int nUse = 0;
int i;
if (dmat==NULL) {
int nDiff = 0;
for (i = 0; i < nPos; i++) {
if (codes1[i] != NOCODE && codes2[i] != NOCODE) {
nUse++;
if (codes1[i] != codes2[i]) nDiff++;
}
}
top = (double)nDiff;
} else {
for (i = 0; i < nPos; i++) {
if (codes1[i] != NOCODE && codes2[i] != NOCODE) {
nUse++;
top += dmat->distances[(unsigned int)codes1[i]][(unsigned int)codes2[i]];
}
}
}
hit->weight = (double)nUse;
hit->dist = nUse > 0 ? top/(double)nUse : 1.0;
seqOps++;
}
void CorrectedPairDistances(profile_t **profiles, int nProfiles,
/*OPTIONAL*/distance_matrix_t *distance_matrix,
int nPos,
/*OUT*/double *distances) {
assert(distances != NULL);
assert(profiles != NULL);
assert(nProfiles>1 && nProfiles <= 4);
besthit_t hit[6];
int iHit,i,j;
for (iHit=0, i=0; i < nProfiles; i++) {
for (j=i+1; j < nProfiles; j++, iHit++) {
ProfileDist(profiles[i],profiles[j],nPos,distance_matrix,/*OUT*/&hit[iHit]);
distances[iHit] = hit[iHit].dist;
}
}
if (pseudoWeight > 0) {
/* Estimate the prior distance */
double dTop = 0;
double dBottom = 0;
for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++) {
dTop += hit[iHit].dist * hit[iHit].weight;
dBottom += hit[iHit].weight;
}
double prior = (dBottom > 0.01) ? dTop/dBottom : 3.0;
for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++)
distances[iHit] = (distances[iHit] * hit[iHit].weight + prior * pseudoWeight)
/ (hit[iHit].weight + pseudoWeight);
}
if (logdist) {
for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++)
distances[iHit] = LogCorrect(distances[iHit]);
}
}
/* During the neighbor-joining phase, a join only violates our constraints if
node1, node2, and other are all represented in the constraint
and if one of the 3 is split and the other two do not agree
*/
int JoinConstraintPenalty(/*IN*/NJ_t *NJ, int node1, int node2) {
if (NJ->nConstraints == 0)
return(0.0);
int penalty = 0;
int iC;
for (iC = 0; iC < NJ->nConstraints; iC++)
penalty += JoinConstraintPenaltyPiece(NJ, node1, node2, iC);
return(penalty);
}
int JoinConstraintPenaltyPiece(NJ_t *NJ, int node1, int node2, int iC) {
profile_t *pOut = NJ->outprofile;
profile_t *p1 = NJ->profiles[node1];
profile_t *p2 = NJ->profiles[node2];
int nOn1 = p1->nOn[iC];
int nOff1 = p1->nOff[iC];
int nOn2 = p2->nOn[iC];
int nOff2 = p2->nOff[iC];
int nOnOut = pOut->nOn[iC] - nOn1 - nOn2;
int nOffOut = pOut->nOff[iC] - nOff1 - nOff2;
if ((nOn1+nOff1) > 0 && (nOn2+nOff2) > 0 && (nOnOut+nOffOut) > 0) {
/* code is -1 for split, 0 for off, 1 for on */
int code1 = (nOn1 > 0 && nOff1 > 0) ? -1 : (nOn1 > 0 ? 1 : 0);
int code2 = (nOn2 > 0 && nOff2 > 0) ? -1 : (nOn2 > 0 ? 1 : 0);
int code3 = (nOnOut > 0 && nOffOut) > 0 ? -1 : (nOnOut > 0 ? 1 : 0);
int nSplit = (code1 == -1 ? 1 : 0) + (code2 == -1 ? 1 : 0) + (code3 == -1 ? 1 : 0);
int nOn = (code1 == 1 ? 1 : 0) + (code2 == 1 ? 1 : 0) + (code3 == 1 ? 1 : 0);
if (nSplit == 1 && nOn == 1)
return(SplitConstraintPenalty(nOn1+nOn2, nOff1+nOff2, nOnOut, nOffOut));
}
/* else */
return(0);
}
void QuartetConstraintPenalties(profile_t *profiles[4], int nConstraints, /*OUT*/double penalty[3]) {
int i;
for (i=0; i < 3; i++)
penalty[i] = 0.0;
if(nConstraints == 0)
return;
int iC;
for (iC = 0; iC < nConstraints; iC++) {
double part[3];
if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/part)) {
for (i=0;i<3;i++)
penalty[i] += part[i];
if (verbose>2
&& (fabs(part[ABvsCD]-part[ACvsBD]) > 0.001 || fabs(part[ABvsCD]-part[ADvsBC]) > 0.001))
fprintf(stderr, "Constraint Penalties at %d: ABvsCD %.3f ACvsBD %.3f ADvsBC %.3f %d/%d %d/%d %d/%d %d/%d\n",
iC, part[ABvsCD], part[ACvsBD], part[ADvsBC],
profiles[0]->nOn[iC], profiles[0]->nOff[iC],
profiles[1]->nOn[iC], profiles[1]->nOff[iC],
profiles[2]->nOn[iC], profiles[2]->nOff[iC],
profiles[3]->nOn[iC], profiles[3]->nOff[iC]);
}
}
if (verbose>2)
fprintf(stderr, "Total Constraint Penalties: ABvsCD %.3f ACvsBD %.3f ADvsBC %.3f\n",
penalty[ABvsCD], penalty[ACvsBD], penalty[ADvsBC]);
}
double PairConstraintDistance(int nOn1, int nOff1, int nOn2, int nOff2) {
double f1 = nOn1/(double)(nOn1+nOff1);
double f2 = nOn2/(double)(nOn2+nOff2);
/* 1 - f1 * f2 - (1-f1)*(1-f2) = 1 - f1 * f2 - 1 + f1 + f2 - f1 * f2 */
return(f1 + f2 - 2.0 * f1 * f2);
}
bool QuartetConstraintPenaltiesPiece(profile_t *profiles[4], int iC, /*OUT*/double piece[3]) {
int nOn[4];
int nOff[4];
int i;
int nSplit = 0;
int nPlus = 0;
int nMinus = 0;
for (i=0; i < 4; i++) {
nOn[i] = profiles[i]->nOn[iC];
nOff[i] = profiles[i]->nOff[iC];
if (nOn[i] + nOff[i] == 0)
return(false); /* ignore */
else if (nOn[i] > 0 && nOff[i] > 0)
nSplit++;
else if (nOn[i] > 0)
nPlus++;
else
nMinus++;
}
/* If just one of them is split or on the other side and the others all agree, also ignore */
if (nPlus >= 3 || nMinus >= 3)
return(false);
piece[ABvsCD] = constraintWeight
* (PairConstraintDistance(nOn[0],nOff[0],nOn[1],nOff[1])
+ PairConstraintDistance(nOn[2],nOff[2],nOn[3],nOff[3]));
piece[ACvsBD] = constraintWeight
* (PairConstraintDistance(nOn[0],nOff[0],nOn[2],nOff[2])
+ PairConstraintDistance(nOn[1],nOff[1],nOn[3],nOff[3]));
piece[ADvsBC] = constraintWeight
* (PairConstraintDistance(nOn[0],nOff[0],nOn[3],nOff[3])
+ PairConstraintDistance(nOn[2],nOff[2],nOn[1],nOff[1]));
return(true);
}
/* Minimum number of constrained leaves that need to be moved
to satisfy the constraint (or 0 if constraint is satisfied)
Defining it this way should ensure that SPR moves that break
constraints get a penalty
*/
int SplitConstraintPenalty(int nOn1, int nOff1, int nOn2, int nOff2) {
return(nOn1 + nOff2 < nOn2 + nOff1 ?
(nOn1 < nOff2 ? nOn1 : nOff2)
: (nOn2 < nOff1 ? nOn2 : nOff1));
}
bool SplitViolatesConstraint(profile_t *profiles[4], int iConstraint) {
int i;
int codes[4]; /* 0 for off, 1 for on, -1 for split (quit if not constrained at all) */
for (i = 0; i < 4; i++) {
if (profiles[i]->nOn[iConstraint] + profiles[i]->nOff[iConstraint] == 0)
return(false);
else if (profiles[i]->nOn[iConstraint] > 0 && profiles[i]->nOff[iConstraint] == 0)
codes[i] = 1;
else if (profiles[i]->nOn[iConstraint] == 0 && profiles[i]->nOff[iConstraint] > 0)
codes[i] = 0;
else
codes[i] = -1;
}
int n0 = 0;
int n1 = 0;
for (i = 0; i < 4; i++) {
if (codes[i] == 0)
n0++;
else if (codes[i] == 1)
n1++;
}
/* 3 on one side means no violation, even if other is code -1
otherwise must have code != -1 and agreement on the split
*/
if (n0 >= 3 || n1 >= 3)
return(false);
if (n0==2 && n1==2 && codes[0] == codes[1] && codes[2] == codes[3])
return(false);
return(true);
}
double LogCorrect(double dist) {
const double maxscore = 3.0;
if (nCodes == 4 && !useMatrix) { /* Jukes-Cantor */
dist = dist < 0.74 ? -0.75*log(1.0 - dist * 4.0/3.0) : maxscore;
} else { /* scoredist-like */
dist = dist < 0.99 ? -1.3*log(1.0 - dist) : maxscore;
}
return (dist < maxscore ? dist : maxscore);
}
/* A helper function -- f1 and f2 can be NULL if the corresponding code != NOCODE
*/
double ProfileDistPiece(unsigned int code1, unsigned int code2,
numeric_t *f1, numeric_t *f2,
/*OPTIONAL*/distance_matrix_t *dmat,
/*OPTIONAL*/numeric_t *codeDist2) {
if (dmat) {
if (code1 != NOCODE && code2 != NOCODE) { /* code1 vs code2 */
return(dmat->distances[code1][code2]);
} else if (codeDist2 != NULL && code1 != NOCODE) { /* code1 vs. codeDist2 */
return(codeDist2[code1]);
} else { /* f1 vs f2 */
if (f1 == NULL) {
if(code1 == NOCODE) return(10.0);
f1 = &dmat->codeFreq[code1][0];
}
if (f2 == NULL) {
if(code2 == NOCODE) return(10.0);
f2 = &dmat->codeFreq[code2][0];
}
return(vector_multiply3_sum(f1,f2,dmat->eigenval,nCodes));
}
} else {
/* no matrix */
if (code1 != NOCODE) {
if (code2 != NOCODE) {
return(code1 == code2 ? 0.0 : 1.0); /* code1 vs code2 */
} else {
if(f2 == NULL) return(10.0);
return(1.0 - f2[code1]); /* code1 vs. f2 */
}
} else {
if (code2 != NOCODE) {
if(f1 == NULL) return(10.0);
return(1.0 - f1[code2]); /* f1 vs code2 */
} else { /* f1 vs. f2 */
if (f1 == NULL || f2 == NULL) return(10.0);
double piece = 1.0;
int k;
for (k = 0; k < nCodes; k++) {
piece -= f1[k] * f2[k];
}
return(piece);
}
}
}
assert(0);
}
/* E.g. GET_FREQ(profile,iPos,iVector)
Gets the next element of the vectors (and updates iVector), or
returns NULL if we didn't store a vector
*/
#define GET_FREQ(P,I,IVECTOR) \
(P->weights[I] > 0 && P->codes[I] == NOCODE ? &P->vectors[nCodes*(IVECTOR++)] : NULL)
void ProfileDist(profile_t *profile1, profile_t *profile2, int nPos,
/*OPTIONAL*/distance_matrix_t *dmat,
/*OUT*/besthit_t *hit) {
double top = 0;
double denom = 0;
int iFreq1 = 0;
int iFreq2 = 0;
int i = 0;
for (i = 0; i < nPos; i++) {
numeric_t *f1 = GET_FREQ(profile1,i,/*IN/OUT*/iFreq1);
numeric_t *f2 = GET_FREQ(profile2,i,/*IN/OUT*/iFreq2);
if (profile1->weights[i] > 0 && profile2->weights[i] > 0) {
double weight = profile1->weights[i] * profile2->weights[i];
denom += weight;
double piece = ProfileDistPiece(profile1->codes[i],profile2->codes[i],f1,f2,dmat,
profile2->codeDist ? &profile2->codeDist[i*nCodes] : NULL);
top += weight * piece;
}
}
assert(iFreq1 == profile1->nVectors);
assert(iFreq2 == profile2->nVectors);
hit->weight = denom > 0 ? denom : 0.01; /* 0.01 is an arbitrarily low value of weight (normally >>1) */
hit->dist = denom > 0 ? top/denom : 1;
profileOps++;
}
/* This should not be called if the update weight is 0, as
in that case code==NOCODE and in=NULL is possible, and then
it will fail.
*/
void AddToFreq(/*IN/OUT*/numeric_t *fOut,
double weight,
unsigned int codeIn, /*OPTIONAL*/numeric_t *fIn,
/*OPTIONAL*/distance_matrix_t *dmat) {
assert(fOut != NULL);
if (fIn != NULL) {
vector_add_mult(fOut, fIn, weight, nCodes);
} else if (dmat) {
assert(codeIn != NOCODE);
vector_add_mult(fOut, dmat->codeFreq[codeIn], weight, nCodes);
} else {
assert(codeIn != NOCODE);
fOut[codeIn] += weight;
}
}
void SetProfile(/*IN/OUT*/NJ_t *NJ, int node, double weight1) {
children_t *c = &NJ->child[node];
assert(c->nChild == 2);
assert(NJ->profiles[c->child[0]] != NULL);
assert(NJ->profiles[c->child[1]] != NULL);
if (NJ->profiles[node] != NULL)
FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints);
NJ->profiles[node] = AverageProfile(NJ->profiles[c->child[0]],
NJ->profiles[c->child[1]],
NJ->nPos, NJ->nConstraints,
NJ->distance_matrix,
weight1);
}
/* bionjWeight is the weight of the first sequence (between 0 and 1),
or -1 to do the average.
*/
profile_t *AverageProfile(profile_t *profile1, profile_t *profile2,
int nPos, int nConstraints,
distance_matrix_t *dmat,
double bionjWeight) {
int i;
if (bionjWeight < 0) {
bionjWeight = 0.5;
}
/* First, set codes and weights and see how big vectors will be */
profile_t *out = NewProfile(nPos, nConstraints);
for (i = 0; i < nPos; i++) {
out->weights[i] = bionjWeight * profile1->weights[i]
+ (1-bionjWeight) * profile2->weights[i];
out->codes[i] = NOCODE;
if (out->weights[i] > 0) {
if (profile1->weights[i] > 0 && profile1->codes[i] != NOCODE
&& (profile2->weights[i] <= 0 || profile1->codes[i] == profile2->codes[i])) {
out->codes[i] = profile1->codes[i];
} else if (profile1->weights[i] <= 0
&& profile2->weights[i] > 0
&& profile2->codes[i] != NOCODE) {
out->codes[i] = profile2->codes[i];
}
if (out->codes[i] == NOCODE) out->nVectors++;
}
}
/* Allocate and set the vectors */
out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors);
for (i = 0; i < nCodes * out->nVectors; i++) out->vectors[i] = 0;
nProfileFreqAlloc += out->nVectors;
nProfileFreqAvoid += nPos - out->nVectors;
int iFreqOut = 0;
int iFreq1 = 0;
int iFreq2 = 0;
for (i=0; i < nPos; i++) {
numeric_t *f = GET_FREQ(out,i,/*IN/OUT*/iFreqOut);
numeric_t *f1 = GET_FREQ(profile1,i,/*IN/OUT*/iFreq1);
numeric_t *f2 = GET_FREQ(profile2,i,/*IN/OUT*/iFreq2);
if (f != NULL) {
if (profile1->weights[i] > 0)
AddToFreq(/*IN/OUT*/f, profile1->weights[i] * bionjWeight,
profile1->codes[i], f1, dmat);
if (profile2->weights[i] > 0)
AddToFreq(/*IN/OUT*/f, profile2->weights[i] * (1.0-bionjWeight),
profile2->codes[i], f2, dmat);
NormalizeFreq(/*IN/OUT*/f, dmat);
} /* end if computing f */
if (verbose > 10 && i < 5) {
fprintf(stderr,"Average profiles: pos %d in-w1 %f in-w2 %f bionjWeight %f to weight %f code %d\n",
i, profile1->weights[i], profile2->weights[i], bionjWeight,
out->weights[i], out->codes[i]);
if (f!= NULL) {
int k;
for (k = 0; k < nCodes; k++)
fprintf(stderr, "\t%c:%f", codesString[k], f ? f[k] : -1.0);
fprintf(stderr,"\n");
}
}
} /* end loop over positions */
assert(iFreq1 == profile1->nVectors);
assert(iFreq2 == profile2->nVectors);
assert(iFreqOut == out->nVectors);
/* compute total constraints */
for (i = 0; i < nConstraints; i++) {
out->nOn[i] = profile1->nOn[i] + profile2->nOn[i];
out->nOff[i] = profile1->nOff[i] + profile2->nOff[i];
}
profileAvgOps++;
return(out);
}
/* Make the (unrotated) frequencies sum to 1
Simply dividing by total_weight is not ideal because of roundoff error
So compute total_freq instead
*/
void NormalizeFreq(/*IN/OUT*/numeric_t *freq, distance_matrix_t *dmat) {
double total_freq = 0;
int k;
if (dmat != NULL) {
/* The total frequency is dot_product(true_frequencies, 1)
So we rotate the 1 vector by eigeninv (stored in eigentot)
*/
total_freq = vector_multiply_sum(freq, dmat->eigentot, nCodes);
} else {
for (k = 0; k < nCodes; k++)
total_freq += freq[k];
}
if (total_freq > fPostTotalTolerance) {
numeric_t inverse_weight = 1.0/total_freq;
vector_multiply_by(/*IN/OUT*/freq, inverse_weight, nCodes);
} else {
/* This can happen if we are in a very low-weight region, e.g. if a mostly-gap position gets weighted down
repeatedly; just set them all to arbitrary but legal values */
if (dmat == NULL) {
for (k = 0; k < nCodes; k++)
freq[k] = 1.0/nCodes;
} else {
for (k = 0; k < nCodes; k++)
freq[k] = dmat->codeFreq[0][k];
}
}
}
/* OutProfile() computes the out-profile */
profile_t *OutProfile(profile_t **profiles, int nProfiles,
int nPos, int nConstraints,
distance_matrix_t *dmat) {
int i; /* position */
int in; /* profile */
profile_t *out = NewProfile(nPos, nConstraints);
double inweight = 1.0/(double)nProfiles; /* The maximal output weight is 1.0 */
/* First, set weights -- code is always NOCODE, prevent weight=0 */
for (i = 0; i < nPos; i++) {
out->weights[i] = 0;
for (in = 0; in < nProfiles; in++)
out->weights[i] += profiles[in]->weights[i] * inweight;
if (out->weights[i] <= 0) out->weights[i] = 1e-20; /* always store a vector */
out->nVectors++;
out->codes[i] = NOCODE; /* outprofile is normally complicated */
}
/* Initialize the frequencies to 0 */
out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors);
for (i = 0; i < nCodes*out->nVectors; i++)
out->vectors[i] = 0;
/* Add up the weights, going through each sequence in turn */
for (in = 0; in < nProfiles; in++) {
int iFreqOut = 0;
int iFreqIn = 0;
for (i = 0; i < nPos; i++) {
numeric_t *fIn = GET_FREQ(profiles[in],i,/*IN/OUT*/iFreqIn);
numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut);
if (profiles[in]->weights[i] > 0)
AddToFreq(/*IN/OUT*/fOut, profiles[in]->weights[i],
profiles[in]->codes[i], fIn, dmat);
}
assert(iFreqOut == out->nVectors);
assert(iFreqIn == profiles[in]->nVectors);
}
/* And normalize the frequencies to sum to 1 */
int iFreqOut = 0;
for (i = 0; i < nPos; i++) {
numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut);
if (fOut)
NormalizeFreq(/*IN/OUT*/fOut, dmat);
}
assert(iFreqOut == out->nVectors);
if (verbose > 10) fprintf(stderr,"Average %d profiles\n", nProfiles);
if(dmat)
SetCodeDist(/*IN/OUT*/out, nPos, dmat);
/* Compute constraints */
for (i = 0; i < nConstraints; i++) {
out->nOn[i] = 0;
out->nOff[i] = 0;
for (in = 0; in < nProfiles; in++) {
out->nOn[i] += profiles[in]->nOn[i];
out->nOff[i] += profiles[in]->nOff[i];
}
}
return(out);
}
void UpdateOutProfile(/*IN/OUT*/profile_t *out, profile_t *old1, profile_t *old2,
profile_t *new, int nActiveOld,
int nPos, int nConstraints,
distance_matrix_t *dmat) {
int i, k;
int iFreqOut = 0;
int iFreq1 = 0;
int iFreq2 = 0;
int iFreqNew = 0;
assert(nActiveOld > 0);
for (i = 0; i < nPos; i++) {
numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut);
numeric_t *fOld1 = GET_FREQ(old1,i,/*IN/OUT*/iFreq1);
numeric_t *fOld2 = GET_FREQ(old2,i,/*IN/OUT*/iFreq2);
numeric_t *fNew = GET_FREQ(new,i,/*IN/OUT*/iFreqNew);
assert(out->codes[i] == NOCODE && fOut != NULL); /* No no-vector optimization for outprofiles */
if (verbose > 3 && i < 3) {
fprintf(stderr,"Updating out-profile position %d weight %f (mult %f)\n",
i, out->weights[i], out->weights[i]*nActiveOld);
}
double originalMult = out->weights[i]*nActiveOld;
double newMult = originalMult + new->weights[i] - old1->weights[i] - old2->weights[i];
out->weights[i] = newMult/(nActiveOld-1);
if (out->weights[i] <= 0) out->weights[i] = 1e-20; /* always use the vector */
for (k = 0; k < nCodes; k++) fOut[k] *= originalMult;
if (old1->weights[i] > 0)
AddToFreq(/*IN/OUT*/fOut, -old1->weights[i], old1->codes[i], fOld1, dmat);
if (old2->weights[i] > 0)
AddToFreq(/*IN/OUT*/fOut, -old2->weights[i], old2->codes[i], fOld2, dmat);
if (new->weights[i] > 0)
AddToFreq(/*IN/OUT*/fOut, new->weights[i], new->codes[i], fNew, dmat);
/* And renormalize */
NormalizeFreq(/*IN/OUT*/fOut, dmat);
if (verbose > 2 && i < 3) {
fprintf(stderr,"Updated out-profile position %d weight %f (mult %f)",
i, out->weights[i], out->weights[i]*nActiveOld);
if(out->weights[i] > 0)
for (k=0;k<nCodes;k++)
fprintf(stderr, " %c:%f", dmat?'?':codesString[k], fOut[k]);
fprintf(stderr,"\n");
}
}
assert(iFreqOut == out->nVectors);
assert(iFreq1 == old1->nVectors);
assert(iFreq2 == old2->nVectors);
assert(iFreqNew == new->nVectors);
if(dmat)
SetCodeDist(/*IN/OUT*/out,nPos,dmat);
/* update constraints -- note in practice this should be a no-op */
for (i = 0; i < nConstraints; i++) {
out->nOn[i] += new->nOn[i] - old1->nOn[i] - old2->nOn[i];
out->nOff[i] += new->nOff[i] - old1->nOff[i] - old2->nOff[i];
}
}
void SetCodeDist(/*IN/OUT*/profile_t *profile, int nPos,
distance_matrix_t *dmat) {
if (profile->codeDist == NULL)
profile->codeDist = (numeric_t*)mymalloc(sizeof(numeric_t)*nPos*nCodes);
int i;
int iFreq = 0;
for (i = 0; i < nPos; i++) {
numeric_t *f = GET_FREQ(profile,i,/*IN/OUT*/iFreq);
int k;
for (k = 0; k < nCodes; k++)
profile->codeDist[i*nCodes+k] = ProfileDistPiece(/*code1*/profile->codes[i], /*code2*/k,
/*f1*/f, /*f2*/NULL,
dmat, NULL);
}
assert(iFreq==profile->nVectors);
}
void SetBestHit(int node, NJ_t *NJ, int nActive,
/*OUT*/besthit_t *bestjoin, /*OUT OPTIONAL*/besthit_t *allhits) {
assert(NJ->parent[node] < 0);
bestjoin->i = node;
bestjoin->j = -1;
bestjoin->dist = 1e20;
bestjoin->criterion = 1e20;
int j;
besthit_t tmp;
#ifdef OPENMP
/* Note -- if we are already in a parallel region, this will be ignored */
#pragma omp parallel for schedule(dynamic, 50)
#endif
for (j = 0; j < NJ->maxnode; j++) {
besthit_t *sv = allhits != NULL ? &allhits[j] : &tmp;
sv->i = node;
sv->j = j;
if (NJ->parent[j] >= 0) {
sv->i = -1; /* illegal/empty join */
sv->weight = 0.0;
sv->criterion = sv->dist = 1e20;
continue;
}
/* Note that we compute self-distances (allow j==node) because the top-hit heuristic
expects self to be within its top hits, but we exclude those from the bestjoin
that we return...
*/
SetDistCriterion(NJ, nActive, /*IN/OUT*/sv);
if (sv->criterion < bestjoin->criterion && node != j)
*bestjoin = *sv;
}
if (verbose>5) {
fprintf(stderr, "SetBestHit %d %d %f %f\n", bestjoin->i, bestjoin->j, bestjoin->dist, bestjoin->criterion);
}
}
void ReadMatrix(char *filename, /*OUT*/numeric_t codes[MAXCODES][MAXCODES], bool checkCodes) {
char buf[BUFFER_SIZE] = "";
FILE *fp = fopen(filename, "r");
if (fp == NULL) {
fprintf(stderr, "Cannot read %s\n",filename);
exit(1);
}
if (fgets(buf,sizeof(buf),fp) == NULL) {
fprintf(stderr, "Error reading header line for %s:\n%s\n", filename, buf);
exit(1);
}
if (checkCodes) {
int i;
int iBufPos;
for (iBufPos=0,i=0;i<nCodes;i++,iBufPos++) {
if(buf[iBufPos] != codesString[i]) {
fprintf(stderr,"Header line\n%s\nin file %s does not have expected code %c # %d in %s\n",
buf, filename, codesString[i], i, codesString);
exit(1);
}
iBufPos++;
if(buf[iBufPos] != '\n' && buf[iBufPos] != '\r' && buf[iBufPos] != '\0' && buf[iBufPos] != '\t') {
fprintf(stderr, "Header line in %s should be tab-delimited\n", filename);
exit(1);
}
if (buf[iBufPos] == '\0' && i < nCodes-1) {
fprintf(stderr, "Header line in %s ends prematurely\n",filename);
exit(1);
}
} /* end loop over codes */
/* Should be at end, but allow \n because of potential DOS \r\n */
if(buf[iBufPos] != '\0' && buf[iBufPos] != '\n' && buf[iBufPos] != '\r') {
fprintf(stderr, "Header line in %s has too many entries\n", filename);
exit(1);
}
}
int iLine;
for (iLine = 0; iLine < nCodes; iLine++) {
buf[0] = '\0';
if (fgets(buf,sizeof(buf),fp) == NULL) {
fprintf(stderr, "Cannot read line %d from file %s\n", iLine+2, filename);
exit(1);
}
char *field = strtok(buf,"\t\r\n");
field = strtok(NULL, "\t"); /* ignore first column */
int iColumn;
for (iColumn = 0; iColumn < nCodes && field != NULL; iColumn++, field = strtok(NULL,"\t")) {
if(sscanf(field,ScanNumericSpec,&codes[iLine][iColumn]) != 1) {
fprintf(stderr,"Cannot parse field %s in file %s\n", field, filename);
exit(1);
}
}
}
}
void ReadVector(char *filename, /*OUT*/numeric_t codes[MAXCODES]) {
FILE *fp = fopen(filename,"r");
if (fp == NULL) {
fprintf(stderr, "Cannot read %s\n",filename);
exit(1);
}
int i;
for (i = 0; i < nCodes; i++) {
if (fscanf(fp,ScanNumericSpec,&codes[i]) != 1) {
fprintf(stderr,"Cannot read %d entry of %s\n",i+1,filename);
exit(1);
}
}
if (fclose(fp) != 0) {
fprintf(stderr, "Error reading %s\n",filename);
exit(1);
}
}
distance_matrix_t *ReadDistanceMatrix(char *prefix) {
char buffer[BUFFER_SIZE];
distance_matrix_t *dmat = (distance_matrix_t*)mymalloc(sizeof(distance_matrix_t));
if(strlen(prefix) > BUFFER_SIZE-20) {
fprintf(stderr,"Filename %s too long\n", prefix);
exit(1);
}
strcpy(buffer, prefix);
strcat(buffer, ".distances");
ReadMatrix(buffer, /*OUT*/dmat->distances, /*checkCodes*/true);
strcpy(buffer, prefix);
strcat(buffer, ".inverses");
ReadMatrix(buffer, /*OUT*/dmat->eigeninv, /*checkCodes*/false);
strcpy(buffer, prefix);
strcat(buffer, ".eigenvalues");
ReadVector(buffer, /*OUT*/dmat->eigenval);
if(verbose>1) fprintf(stderr, "Read distance matrix from %s\n",prefix);
SetupDistanceMatrix(/*IN/OUT*/dmat);
return(dmat);
}
void SetupDistanceMatrix(/*IN/OUT*/distance_matrix_t *dmat) {
/* Check that the eigenvalues and eigen-inverse are consistent with the
distance matrix and that the matrix is symmetric */
int i,j,k;
for (i = 0; i < nCodes; i++) {
for (j = 0; j < nCodes; j++) {
if(fabs(dmat->distances[i][j]-dmat->distances[j][i]) > 1e-6) {
fprintf(stderr,"Distance matrix not symmetric for %d,%d: %f vs %f\n",
i+1,j+1,
dmat->distances[i][j],
dmat->distances[j][i]);
exit(1);
}
double total = 0.0;
for (k = 0; k < nCodes; k++)
total += dmat->eigenval[k] * dmat->eigeninv[k][i] * dmat->eigeninv[k][j];
if(fabs(total - dmat->distances[i][j]) > 1e-6) {
fprintf(stderr,"Distance matrix entry %d,%d should be %f but eigen-representation gives %f\n",
i+1,j+1,dmat->distances[i][j],total);
exit(1);
}
}
}
/* And compute eigentot */
for (k = 0; k < nCodes; k++) {
dmat->eigentot[k] = 0.;
int j;
for (j = 0; j < nCodes; j++)
dmat->eigentot[k] += dmat->eigeninv[k][j];
}
/* And compute codeFreq */
int code;
for(code = 0; code < nCodes; code++) {
for (k = 0; k < nCodes; k++) {
dmat->codeFreq[code][k] = dmat->eigeninv[k][code];
}
}
/* And gapFreq */
for(code = 0; code < nCodes; code++) {
double gapFreq = 0.0;
for (k = 0; k < nCodes; k++)
gapFreq += dmat->codeFreq[k][code];
dmat->gapFreq[code] = gapFreq / nCodes;
}
if(verbose>10) fprintf(stderr, "Made codeFreq\n");
}
nni_t ChooseNNI(profile_t *profiles[4],
/*OPTIONAL*/distance_matrix_t *dmat,
int nPos, int nConstraints,
/*OUT*/double criteria[3]) {
double d[6];
CorrectedPairDistances(profiles, 4, dmat, nPos, /*OUT*/d);
double penalty[3]; /* indexed as nni_t */
QuartetConstraintPenalties(profiles, nConstraints, /*OUT*/penalty);
criteria[ABvsCD] = d[qAB] + d[qCD] + penalty[ABvsCD];
criteria[ACvsBD] = d[qAC] + d[qBD] + penalty[ACvsBD];
criteria[ADvsBC] = d[qAD] + d[qBC] + penalty[ADvsBC];
nni_t choice = ABvsCD;
if (criteria[ACvsBD] < criteria[ABvsCD] && criteria[ACvsBD] <= criteria[ADvsBC]) {
choice = ACvsBD;
} else if (criteria[ADvsBC] < criteria[ABvsCD] && criteria[ADvsBC] <= criteria[ACvsBD]) {
choice = ADvsBC;
}
if (verbose > 1 && penalty[choice] > penalty[ABvsCD] + 1e-6) {
fprintf(stderr, "Worsen constraint: from %.3f to %.3f distance %.3f to %.3f: ",
penalty[ABvsCD], penalty[choice],
criteria[ABvsCD], choice == ACvsBD ? criteria[ACvsBD] : criteria[ADvsBC]);
int iC;
for (iC = 0; iC < nConstraints; iC++) {
double ppart[3];
if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/ppart)) {
double old_penalty = ppart[ABvsCD];
double new_penalty = ppart[choice];
if (new_penalty > old_penalty + 1e-6)
fprintf(stderr, " %d (%d/%d %d/%d %d/%d %d/%d)", iC,
profiles[0]->nOn[iC], profiles[0]->nOff[iC],
profiles[1]->nOn[iC], profiles[1]->nOff[iC],
profiles[2]->nOn[iC], profiles[2]->nOff[iC],
profiles[3]->nOn[iC], profiles[3]->nOff[iC]);
}
}
fprintf(stderr,"\n");
}
if (verbose > 3)
fprintf(stderr, "NNI scores ABvsCD %.5f ACvsBD %.5f ADvsBC %.5f choice %s\n",
criteria[ABvsCD], criteria[ACvsBD], criteria[ADvsBC],
choice == ABvsCD ? "AB|CD" : (choice == ACvsBD ? "AC|BD" : "AD|BC"));
return(choice);
}
profile_t *PosteriorProfile(profile_t *p1, profile_t *p2,
double len1, double len2,
/*OPTIONAL*/transition_matrix_t *transmat,
rates_t *rates,
int nPos, int nConstraints) {
if (len1 < MLMinBranchLength)
len1 = MLMinBranchLength;
if (len2 < MLMinBranchLength)
len2 = MLMinBranchLength;
int i,j,k;
profile_t *out = NewProfile(nPos, nConstraints);
for (i = 0; i < nPos; i++) {
out->codes[i] = NOCODE;
out->weights[i] = 1.0;
}
out->nVectors = nPos;
out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors);
for (i = 0; i < nCodes * out->nVectors; i++) out->vectors[i] = 0;
int iFreqOut = 0;
int iFreq1 = 0;
int iFreq2 = 0;
numeric_t *expeigenRates1 = NULL, *expeigenRates2 = NULL;
if (transmat != NULL) {
expeigenRates1 = ExpEigenRates(len1, transmat, rates);
expeigenRates2 = ExpEigenRates(len2, transmat, rates);
}
if (transmat == NULL) { /* Jukes-Cantor */
assert(nCodes == 4);
double *PSame1 = PSameVector(len1, rates);
double *PDiff1 = PDiffVector(PSame1, rates);
double *PSame2 = PSameVector(len2, rates);
double *PDiff2 = PDiffVector(PSame2, rates);
numeric_t mix1[4], mix2[4];
for (i=0; i < nPos; i++) {
int iRate = rates->ratecat[i];
double w1 = p1->weights[i];
double w2 = p2->weights[i];
int code1 = p1->codes[i];
int code2 = p2->codes[i];
numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1);
numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2);
/* First try to store a simple profile */
if (f1 == NULL && f2 == NULL) {
if (code1 == NOCODE && code2 == NOCODE) {
out->codes[i] = NOCODE;
out->weights[i] = 0.0;
continue;
} else if (code1 == NOCODE) {
/* Posterior(parent | character & gap, len1, len2) = Posterior(parent | character, len1)
= PSame() for matching characters and 1-PSame() for the rest
= (pSame - pDiff) * character + (1-(pSame-pDiff)) * gap
*/
out->codes[i] = code2;
out->weights[i] = w2 * (PSame2[iRate] - PDiff2[iRate]);
continue;
} else if (code2 == NOCODE) {
out->codes[i] = code1;
out->weights[i] = w1 * (PSame1[iRate] - PDiff1[iRate]);
continue;
} else if (code1 == code2) {
out->codes[i] = code1;
double f12code = (w1*PSame1[iRate] + (1-w1)*0.25) * (w2*PSame2[iRate] + (1-w2)*0.25);
double f12other = (w1*PDiff1[iRate] + (1-w1)*0.25) * (w2*PDiff2[iRate] + (1-w2)*0.25);
/* posterior probability of code1/code2 after scaling */
double pcode = f12code/(f12code+3*f12other);
/* Now f = w * (code ? 1 : 0) + (1-w) * 0.25, so to get pcode we need
fcode = 1/4 + w1*3/4 or w = (f-1/4)*4/3
*/
out->weights[i] = (pcode - 0.25) * 4.0/3.0;
/* This can be zero because of numerical problems, I think */
if (out->weights[i] < 1e-6) {
if (verbose > 1)
fprintf(stderr, "Replaced weight %f with %f from w1 %f w2 %f PSame %f %f f12code %f f12other %f\n",
out->weights[i], 1e-6,
w1, w2,
PSame1[iRate], PSame2[iRate],
f12code, f12other);
out->weights[i] = 1e-6;
}
continue;
}
}
/* if we did not compute a simple profile, then do the full computation and
store the full vector
*/
if (f1 == NULL) {
for (j = 0; j < 4; j++)
mix1[j] = (1-w1)*0.25;
if(code1 != NOCODE)
mix1[code1] += w1;
f1 = mix1;
}
if (f2 == NULL) {
for (j = 0; j < 4; j++)
mix2[j] = (1-w2)*0.25;
if(code2 != NOCODE)
mix2[code2] += w2;
f2 = mix2;
}
out->codes[i] = NOCODE;
out->weights[i] = 1.0;
numeric_t *f = GET_FREQ(out,i,/*IN/OUT*/iFreqOut);
double lkAB = 0;
for (j = 0; j < 4; j++) {
f[j] = (f1[j] * PSame1[iRate] + (1.0-f1[j]) * PDiff1[iRate])
* (f2[j] * PSame2[iRate] + (1.0-f2[j]) * PDiff2[iRate]);
lkAB += f[j];
}
double lkABInv = 1.0/lkAB;
for (j = 0; j < 4; j++)
f[j] *= lkABInv;
}
PSame1 = myfree(PSame1, sizeof(double) * rates->nRateCategories);
PSame2 = myfree(PSame2, sizeof(double) * rates->nRateCategories);
PDiff1 = myfree(PDiff1, sizeof(double) * rates->nRateCategories);
PDiff2 = myfree(PDiff2, sizeof(double) * rates->nRateCategories);
} else if (nCodes == 4) { /* matrix model on nucleotides */
numeric_t *fGap = &transmat->codeFreq[NOCODE][0];
numeric_t f1mix[4], f2mix[4];
for (i=0; i < nPos; i++) {
if (p1->codes[i] == NOCODE && p2->codes[i] == NOCODE
&& p1->weights[i] == 0 && p2->weights[i] == 0) {
/* aligning gap with gap -- just output a gap
out->codes[i] is already set to NOCODE so need not set that */
out->weights[i] = 0;
continue;
}
int iRate = rates->ratecat[i];
numeric_t *expeigen1 = &expeigenRates1[iRate*4];
numeric_t *expeigen2 = &expeigenRates2[iRate*4];
numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1);
numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2);
numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut);
assert(fOut != NULL);
if (f1 == NULL) {
f1 = &transmat->codeFreq[p1->codes[i]][0]; /* codeFreq includes an entry for NOCODE */
double w = p1->weights[i];
if (w > 0.0 && w < 1.0) {
for (j = 0; j < 4; j++)
f1mix[j] = w * f1[j] + (1.0-w) * fGap[j];
f1 = f1mix;
}
}
if (f2 == NULL) {
f2 = &transmat->codeFreq[p2->codes[i]][0];
double w = p2->weights[i];
if (w > 0.0 && w < 1.0) {
for (j = 0; j < 4; j++)
f2mix[j] = w * f2[j] + (1.0-w) * fGap[j];
f2 = f2mix;
}
}
numeric_t fMult1[4] ALIGNED; /* rotated1 * expeigen1 */
numeric_t fMult2[4] ALIGNED; /* rotated2 * expeigen2 */
#if 0 /* SSE3 is slower */
vector_multiply(f1, expeigen1, 4, /*OUT*/fMult1);
vector_multiply(f2, expeigen2, 4, /*OUT*/fMult2);
#else
for (j = 0; j < 4; j++) {
fMult1[j] = f1[j]*expeigen1[j];
fMult2[j] = f2[j]*expeigen2[j];
}
#endif
numeric_t fPost[4] ALIGNED; /* in unrotated space */
for (j = 0; j < 4; j++) {
#if 0 /* SSE3 is slower */
fPost[j] = vector_dot_product_rot(fMult1, fMult2, &transmat->codeFreq[j][0], 4)
* transmat->statinv[j]; */
#else
double out1 = 0;
double out2 = 0;
for (k = 0; k < 4; k++) {
out1 += fMult1[k] * transmat->codeFreq[j][k];
out2 += fMult2[k] * transmat->codeFreq[j][k];
}
fPost[j] = out1*out2*transmat->statinv[j];
#endif
}
double fPostTot = 0;
for (j = 0; j < 4; j++)
fPostTot += fPost[j];
assert(fPostTot > fPostTotalTolerance);
double fPostInv = 1.0/fPostTot;
#if 0 /* SSE3 is slower */
vector_multiply_by(fPost, fPostInv, 4);
#else
for (j = 0; j < 4; j++)
fPost[j] *= fPostInv;
#endif
/* and finally, divide by stat again & rotate to give the new frequencies */
matrixt_by_vector4(transmat->eigeninvT, fPost, /*OUT*/fOut);
} /* end loop over position i */
} else if (nCodes == 20) { /* matrix model on amino acids */
numeric_t *fGap = &transmat->codeFreq[NOCODE][0];
numeric_t f1mix[20] ALIGNED;
numeric_t f2mix[20] ALIGNED;
for (i=0; i < nPos; i++) {
if (p1->codes[i] == NOCODE && p2->codes[i] == NOCODE
&& p1->weights[i] == 0 && p2->weights[i] == 0) {
/* aligning gap with gap -- just output a gap
out->codes[i] is already set to NOCODE so need not set that */
out->weights[i] = 0;
continue;
}
int iRate = rates->ratecat[i];
numeric_t *expeigen1 = &expeigenRates1[iRate*20];
numeric_t *expeigen2 = &expeigenRates2[iRate*20];
numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1);
numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2);
numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut);
assert(fOut != NULL);
if (f1 == NULL) {
f1 = &transmat->codeFreq[p1->codes[i]][0]; /* codeFreq includes an entry for NOCODE */
double w = p1->weights[i];
if (w > 0.0 && w < 1.0) {
for (j = 0; j < 20; j++)
f1mix[j] = w * f1[j] + (1.0-w) * fGap[j];
f1 = f1mix;
}
}
if (f2 == NULL) {
f2 = &transmat->codeFreq[p2->codes[i]][0];
double w = p2->weights[i];
if (w > 0.0 && w < 1.0) {
for (j = 0; j < 20; j++)
f2mix[j] = w * f2[j] + (1.0-w) * fGap[j];
f2 = f2mix;
}
}
numeric_t fMult1[20] ALIGNED; /* rotated1 * expeigen1 */
numeric_t fMult2[20] ALIGNED; /* rotated2 * expeigen2 */
vector_multiply(f1, expeigen1, 20, /*OUT*/fMult1);
vector_multiply(f2, expeigen2, 20, /*OUT*/fMult2);
numeric_t fPost[20] ALIGNED; /* in unrotated space */
for (j = 0; j < 20; j++) {
numeric_t value = vector_dot_product_rot(fMult1, fMult2, &transmat->codeFreq[j][0], 20)
* transmat->statinv[j];
/* Added this logic try to avoid rare numerical problems */
fPost[j] = value >= 0 ? value : 0;
}
double fPostTot = vector_sum(fPost, 20);
assert(fPostTot > fPostTotalTolerance);
double fPostInv = 1.0/fPostTot;
vector_multiply_by(/*IN/OUT*/fPost, fPostInv, 20);
int ch = -1; /* the dominant character, if any */
if (!exactML) {
for (j = 0; j < 20; j++) {
if (fPost[j] >= approxMLminf) {
ch = j;
break;
}
}
}
/* now, see if we can use the approximation
fPost ~= (1 or 0) * w + nearP * (1-w)
to avoid rotating */
double w = 0;
if (ch >= 0) {
w = (fPost[ch] - transmat->nearP[ch][ch]) / (1.0 - transmat->nearP[ch][ch]);
for (j = 0; j < 20; j++) {
if (j != ch) {
double fRough = (1.0-w) * transmat->nearP[ch][j];
if (fRough < fPost[j] * approxMLminratio) {
ch = -1; /* give up on the approximation */
break;
}
}
}
}
if (ch >= 0) {
nAAPosteriorRough++;
double wInvStat = w * transmat->statinv[ch];
for (j = 0; j < 20; j++)
fOut[j] = wInvStat * transmat->codeFreq[ch][j] + (1.0-w) * transmat->nearFreq[ch][j];
} else {
/* and finally, divide by stat again & rotate to give the new frequencies */
nAAPosteriorExact++;
for (j = 0; j < 20; j++)
fOut[j] = vector_multiply_sum(fPost, &transmat->eigeninv[j][0], 20);
}
} /* end loop over position i */
} else {
assert(0); /* illegal nCodes */
}
if (transmat != NULL) {
expeigenRates1 = myfree(expeigenRates1, sizeof(numeric_t) * rates->nRateCategories * nCodes);
expeigenRates2 = myfree(expeigenRates2, sizeof(numeric_t) * rates->nRateCategories * nCodes);
}
/* Reallocate out->vectors to be the right size */
out->nVectors = iFreqOut;
if (out->nVectors == 0)
out->vectors = (numeric_t*)myfree(out->vectors, sizeof(numeric_t)*nCodes*nPos);
else
out->vectors = (numeric_t*)myrealloc(out->vectors,
/*OLDSIZE*/sizeof(numeric_t)*nCodes*nPos,
/*NEWSIZE*/sizeof(numeric_t)*nCodes*out->nVectors,
/*copy*/true); /* try to save space */
nProfileFreqAlloc += out->nVectors;
nProfileFreqAvoid += nPos - out->nVectors;
/* compute total constraints */
for (i = 0; i < nConstraints; i++) {
out->nOn[i] = p1->nOn[i] + p2->nOn[i];
out->nOff[i] = p1->nOff[i] + p2->nOff[i];
}
nPosteriorCompute++;
return(out);
}
double *PSameVector(double length, rates_t *rates) {
double *pSame = mymalloc(sizeof(double) * rates->nRateCategories);
int iRate;
for (iRate = 0; iRate < rates->nRateCategories; iRate++)
pSame[iRate] = 0.25 + 0.75 * exp((-4.0/3.0) * fabs(length*rates->rates[iRate]));
return(pSame);
}
double *PDiffVector(double *pSame, rates_t *rates) {
double *pDiff = mymalloc(sizeof(double) * rates->nRateCategories);
int iRate;
for (iRate = 0; iRate < rates->nRateCategories; iRate++)
pDiff[iRate] = (1.0 - pSame[iRate])/3.0;
return(pDiff);
}
numeric_t *ExpEigenRates(double length, transition_matrix_t *transmat, rates_t *rates) {
numeric_t *expeigen = mymalloc(sizeof(numeric_t) * nCodes * rates->nRateCategories);
int iRate, j;
for (iRate = 0; iRate < rates->nRateCategories; iRate++) {
for (j = 0; j < nCodes; j++) {
double relLen = length * rates->rates[iRate];
/* very short branch lengths lead to numerical problems so prevent them */
if (relLen < MLMinRelBranchLength)
relLen = MLMinRelBranchLength;
expeigen[iRate*nCodes + j] = exp(relLen * transmat->eigenval[j]);
}
}
return(expeigen);
}
double PairLogLk(profile_t *pA, profile_t *pB, double length, int nPos,
/*OPTIONAL*/transition_matrix_t *transmat,
rates_t *rates,
/*OPTIONAL IN/OUT*/double *site_likelihoods) {
double lk = 1.0;
double loglk = 0.0; /* stores underflow of lk during the loop over positions */
int i,j;
assert(rates != NULL && rates->nRateCategories > 0);
numeric_t *expeigenRates = NULL;
if (transmat != NULL)
expeigenRates = ExpEigenRates(length, transmat, rates);
if (transmat == NULL) { /* Jukes-Cantor */
assert (nCodes == 4);
double *pSame = PSameVector(length, rates);
double *pDiff = PDiffVector(pSame, rates);
int iFreqA = 0;
int iFreqB = 0;
for (i = 0; i < nPos; i++) {
int iRate = rates->ratecat[i];
double wA = pA->weights[i];
double wB = pB->weights[i];
int codeA = pA->codes[i];
int codeB = pB->codes[i];
numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA);
numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB);
double lkAB = 0;
if (fA == NULL && fB == NULL) {
if (codeA == NOCODE) { /* A is all gaps */
/* gap to gap is sum(j) 0.25 * (0.25 * pSame + 0.75 * pDiff) = sum(i) 0.25*0.25 = 0.25
gap to any character gives the same result
*/
lkAB = 0.25;
} else if (codeB == NOCODE) { /* B is all gaps */
lkAB = 0.25;
} else if (codeA == codeB) { /* A and B match */
lkAB = pSame[iRate] * wA*wB + 0.25 * (1-wA*wB);
} else { /* codeA != codeB */
lkAB = pDiff[iRate] * wA*wB + 0.25 * (1-wA*wB);
}
} else if (fA == NULL) {
/* Compare codeA to profile of B */
if (codeA == NOCODE)
lkAB = 0.25;
else
lkAB = wA * (pDiff[iRate] + fB[codeA] * (pSame[iRate]-pDiff[iRate])) + (1.0-wA) * 0.25;
/* because lkAB = wA * P(codeA->B) + (1-wA) * 0.25
P(codeA -> B) = sum(j) P(B==j) * (j==codeA ? pSame : pDiff)
= sum(j) P(B==j) * pDiff +
= pDiff + P(B==codeA) * (pSame-pDiff)
*/
} else if (fB == NULL) { /* Compare codeB to profile of A */
if (codeB == NOCODE)
lkAB = 0.25;
else
lkAB = wB * (pDiff[iRate] + fA[codeB] * (pSame[iRate]-pDiff[iRate])) + (1.0-wB) * 0.25;
} else { /* both are full profiles */
for (j = 0; j < 4; j++)
lkAB += fB[j] * (fA[j] * pSame[iRate] + (1-fA[j])* pDiff[iRate]); /* P(A|B) */
}
assert(lkAB > 0);
lk *= lkAB;
while (lk < LkUnderflow) {
lk *= LkUnderflowInv;
loglk -= LogLkUnderflow;
}
if (site_likelihoods != NULL)
site_likelihoods[i] *= lkAB;
}
pSame = myfree(pSame, sizeof(double) * rates->nRateCategories);
pDiff = myfree(pDiff, sizeof(double) * rates->nRateCategories);
} else if (nCodes == 4) { /* matrix model on nucleotides */
int iFreqA = 0;
int iFreqB = 0;
numeric_t fAmix[4], fBmix[4];
numeric_t *fGap = &transmat->codeFreq[NOCODE][0];
for (i = 0; i < nPos; i++) {
int iRate = rates->ratecat[i];
numeric_t *expeigen = &expeigenRates[iRate*4];
double wA = pA->weights[i];
double wB = pB->weights[i];
if (wA == 0 && wB == 0 && pA->codes[i] == NOCODE && pB->codes[i] == NOCODE) {
/* Likelihood of A vs B is 1, so nothing changes
Do not need to advance iFreqA or iFreqB */
continue;
}
numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA);
numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB);
if (fA == NULL)
fA = &transmat->codeFreq[pA->codes[i]][0];
if (wA > 0.0 && wA < 1.0) {
for (j = 0; j < 4; j++)
fAmix[j] = wA*fA[j] + (1.0-wA)*fGap[j];
fA = fAmix;
}
if (fB == NULL)
fB = &transmat->codeFreq[pB->codes[i]][0];
if (wB > 0.0 && wB < 1.0) {
for (j = 0; j < 4; j++)
fBmix[j] = wB*fB[j] + (1.0-wB)*fGap[j];
fB = fBmix;
}
/* SSE3 instructions do not speed this step up:
numeric_t lkAB = vector_multiply3_sum(expeigen, fA, fB); */
// dsp this is where check for <=0 was added in 2.1.1.LG
double lkAB = 0;
for (j = 0; j < 4; j++)
lkAB += expeigen[j]*fA[j]*fB[j];
assert(lkAB > 0);
if (site_likelihoods != NULL)
site_likelihoods[i] *= lkAB;
lk *= lkAB;
while (lk < LkUnderflow) {
lk *= LkUnderflowInv;
loglk -= LogLkUnderflow;
}
while (lk > LkUnderflowInv) {
lk *= LkUnderflow;
loglk += LogLkUnderflow;
}
}
} else if (nCodes == 20) { /* matrix model on amino acids */
int iFreqA = 0;
int iFreqB = 0;
numeric_t fAmix[20], fBmix[20];
numeric_t *fGap = &transmat->codeFreq[NOCODE][0];
for (i = 0; i < nPos; i++) {
int iRate = rates->ratecat[i];
numeric_t *expeigen = &expeigenRates[iRate*20];
double wA = pA->weights[i];
double wB = pB->weights[i];
if (wA == 0 && wB == 0 && pA->codes[i] == NOCODE && pB->codes[i] == NOCODE) {
/* Likelihood of A vs B is 1, so nothing changes
Do not need to advance iFreqA or iFreqB */
continue;
}
numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA);
numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB);
if (fA == NULL)
fA = &transmat->codeFreq[pA->codes[i]][0];
if (wA > 0.0 && wA < 1.0) {
for (j = 0; j < 20; j++)
fAmix[j] = wA*fA[j] + (1.0-wA)*fGap[j];
fA = fAmix;
}
if (fB == NULL)
fB = &transmat->codeFreq[pB->codes[i]][0];
if (wB > 0.0 && wB < 1.0) {
for (j = 0; j < 20; j++)
fBmix[j] = wB*fB[j] + (1.0-wB)*fGap[j];
fB = fBmix;
}
numeric_t lkAB = vector_multiply3_sum(expeigen, fA, fB, 20);
if (!(lkAB > 0)) {
/* If this happens, it indicates a numerical problem that needs to be addressed elsewhere,
so report all the details */
fprintf(stderr, "# FastTree.c::PairLogLk -- numerical problem!\n");
fprintf(stderr, "# This block is intended for loading into R\n");
fprintf(stderr, "lkAB = %.8g\n", lkAB);
fprintf(stderr, "Branch_length= %.8g\nalignment_position=%d\nnCodes=%d\nrate_category=%d\nrate=%.8g\n",
length, i, nCodes, iRate, rates->rates[iRate]);
fprintf(stderr, "wA=%.8g\nwB=%.8g\n", wA, wB);
fprintf(stderr, "codeA = %d\ncodeB = %d\n", pA->codes[i], pB->codes[i]);
fprintf(stderr, "fA = c(");
for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", fA[j]);
fprintf(stderr,")\n");
fprintf(stderr, "fB = c(");
for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", fB[j]);
fprintf(stderr,")\n");
fprintf(stderr, "stat = c(");
for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", transmat->stat[j]);
fprintf(stderr,")\n");
fprintf(stderr, "eigenval = c(");
for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", transmat->eigenval[j]);
fprintf(stderr,")\n");
fprintf(stderr, "expeigen = c(");
for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", expeigen[j]);
fprintf(stderr,")\n");
int k;
fprintf(stderr, "codeFreq = c(");
for (j = 0; j < nCodes; j++) for(k = 0; k < nCodes; k++) fprintf(stderr, "%s %.8g", j==0 && k==0?"":",",
transmat->codeFreq[j][k]);
fprintf(stderr,")\n");
fprintf(stderr, "eigeninv = c(");
for (j = 0; j < nCodes; j++) for(k = 0; k < nCodes; k++) fprintf(stderr, "%s %.8g", j==0 && k==0?"":",",
transmat->eigeninv[j][k]);
fprintf(stderr,")\n");
fprintf(stderr, "# Transform into matrices and compute un-rotated vectors for profiles A and B\n");
fprintf(stderr, "codeFreq = matrix(codeFreq,nrow=20);\n");
fprintf(stderr, "eigeninv = matrix(eigeninv,nrow=20);\n");
fputs("unrotA = stat * (eigeninv %*% fA)\n", stderr);
fputs("unrotB = stat * (eigeninv %*% fB)\n", stderr);
fprintf(stderr,"# End of R block\n");
}
assert(lkAB > 0);
if (site_likelihoods != NULL)
site_likelihoods[i] *= lkAB;
lk *= lkAB;
while (lk < LkUnderflow) {
lk *= LkUnderflowInv;
loglk -= LogLkUnderflow;
}
while (lk > LkUnderflowInv) {
lk *= LkUnderflow;
loglk += LogLkUnderflow;
}
}
} else {
assert(0); /* illegal nCodes */
}
if (transmat != NULL)
expeigenRates = myfree(expeigenRates, sizeof(numeric_t) * rates->nRateCategories * 20);
loglk += log(lk);
nLkCompute++;
return(loglk);
}
double MLQuartetLogLk(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD,
int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
/*IN*/double branch_lengths[5],
/*OPTIONAL OUT*/double *site_likelihoods) {
profile_t *pAB = PosteriorProfile(pA, pB,
branch_lengths[0], branch_lengths[1],
transmat,
rates,
nPos, /*nConstraints*/0);
profile_t *pCD = PosteriorProfile(pC, pD,
branch_lengths[2], branch_lengths[3],
transmat,
rates,
nPos, /*nConstraints*/0);
if (site_likelihoods != NULL) {
int i;
for (i = 0; i < nPos; i++)
site_likelihoods[i] = 1.0;
}
/* Roughly, P(A,B,C,D) = P(A) P(B|A) P(D|C) P(AB | CD) */
double loglk = PairLogLk(pA, pB, branch_lengths[0]+branch_lengths[1],
nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods)
+ PairLogLk(pC, pD, branch_lengths[2]+branch_lengths[3],
nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods)
+ PairLogLk(pAB, pCD, branch_lengths[4],
nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods);
pAB = FreeProfile(pAB, nPos, /*nConstraints*/0);
pCD = FreeProfile(pCD, nPos, /*nConstraints*/0);
return(loglk);
}
double PairNegLogLk(double x, void *data) {
quartet_opt_t *qo = (quartet_opt_t *)data;
assert(qo != NULL);
assert(qo->pair1 != NULL && qo->pair2 != NULL);
qo->nEval++;
double loglk = PairLogLk(qo->pair1, qo->pair2, x, qo->nPos, qo->transmat, qo->rates, /*site_lk*/NULL);
assert(loglk < 1e100);
if (verbose > 5)
fprintf(stderr, "PairLogLk(%.4f) = %.4f\n", x, loglk);
return(-loglk);
}
double MLQuartetOptimize(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD,
int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
/*IN/OUT*/double branch_lengths[5],
/*OPTIONAL OUT*/bool *pStarTest,
/*OPTIONAL OUT*/double *site_likelihoods) {
int j;
double start_length[5];
for (j = 0; j < 5; j++) {
start_length[j] = branch_lengths[j];
if (branch_lengths[j] < MLMinBranchLength)
branch_lengths[j] = MLMinBranchLength;
}
quartet_opt_t qopt = { nPos, transmat, rates, /*nEval*/0,
/*pair1*/NULL, /*pair2*/NULL };
double f2x, negloglk;
if (pStarTest != NULL)
*pStarTest = false;
/* First optimize internal branch, then branch to A, B, C, D, in turn
May use star test to quit after internal branch
*/
profile_t *pAB = PosteriorProfile(pA, pB,
branch_lengths[LEN_A], branch_lengths[LEN_B],
transmat, rates, nPos, /*nConstraints*/0);
profile_t *pCD = PosteriorProfile(pC, pD,
branch_lengths[LEN_C], branch_lengths[LEN_D],
transmat, rates, nPos, /*nConstraints*/0);
qopt.pair1 = pAB;
qopt.pair2 = pCD;
branch_lengths[LEN_I] = onedimenmin(/*xmin*/MLMinBranchLength,
/*xguess*/branch_lengths[LEN_I],
/*xmax*/6.0,
PairNegLogLk,
/*data*/&qopt,
/*ftol*/MLFTolBranchLength,
/*atol*/MLMinBranchLengthTolerance,
/*OUT*/&negloglk,
/*OUT*/&f2x);
if (pStarTest != NULL) {
assert(site_likelihoods == NULL);
double loglkStar = -PairNegLogLk(MLMinBranchLength, &qopt);
if (loglkStar < -negloglk - closeLogLkLimit) {
*pStarTest = true;
double off = PairLogLk(pA, pB,
branch_lengths[LEN_A] + branch_lengths[LEN_B],
qopt.nPos, qopt.transmat, qopt.rates, /*site_lk*/NULL)
+ PairLogLk(pC, pD,
branch_lengths[LEN_C] + branch_lengths[LEN_D],
qopt.nPos, qopt.transmat, qopt.rates, /*site_lk*/NULL);
pAB = FreeProfile(pAB, nPos, /*nConstraints*/0);
pCD = FreeProfile(pCD, nPos, /*nConstraints*/0);
return (-negloglk + off);
}
}
pAB = FreeProfile(pAB, nPos, /*nConstraints*/0);
profile_t *pBCD = PosteriorProfile(pB, pCD,
branch_lengths[LEN_B], branch_lengths[LEN_I],
transmat, rates, nPos, /*nConstraints*/0);
qopt.pair1 = pA;
qopt.pair2 = pBCD;
branch_lengths[LEN_A] = onedimenmin(/*xmin*/MLMinBranchLength,
/*xguess*/branch_lengths[LEN_A],
/*xmax*/6.0,
PairNegLogLk,
/*data*/&qopt,
/*ftol*/MLFTolBranchLength,
/*atol*/MLMinBranchLengthTolerance,
/*OUT*/&negloglk,
/*OUT*/&f2x);
pBCD = FreeProfile(pBCD, nPos, /*nConstraints*/0);
profile_t *pACD = PosteriorProfile(pA, pCD,
branch_lengths[LEN_A], branch_lengths[LEN_I],
transmat, rates, nPos, /*nConstraints*/0);
qopt.pair1 = pB;
qopt.pair2 = pACD;
branch_lengths[LEN_B] = onedimenmin(/*xmin*/MLMinBranchLength,
/*xguess*/branch_lengths[LEN_B],
/*xmax*/6.0,
PairNegLogLk,
/*data*/&qopt,
/*ftol*/MLFTolBranchLength,
/*atol*/MLMinBranchLengthTolerance,
/*OUT*/&negloglk,
/*OUT*/&f2x);
pACD = FreeProfile(pACD, nPos, /*nConstraints*/0);
pCD = FreeProfile(pCD, nPos, /*nConstraints*/0);
pAB = PosteriorProfile(pA, pB,
branch_lengths[LEN_A], branch_lengths[LEN_B],
transmat, rates, nPos, /*nConstraints*/0);
profile_t *pABD = PosteriorProfile(pAB, pD,
branch_lengths[LEN_I], branch_lengths[LEN_D],
transmat, rates, nPos, /*nConstraints*/0);
qopt.pair1 = pC;
qopt.pair2 = pABD;
branch_lengths[LEN_C] = onedimenmin(/*xmin*/MLMinBranchLength,
/*xguess*/branch_lengths[LEN_C],
/*xmax*/6.0,
PairNegLogLk,
/*data*/&qopt,
/*ftol*/MLFTolBranchLength,
/*atol*/MLMinBranchLengthTolerance,
/*OUT*/&negloglk,
/*OUT*/&f2x);
pABD = FreeProfile(pABD, nPos, /*nConstraints*/0);
profile_t *pABC = PosteriorProfile(pAB, pC,
branch_lengths[LEN_I], branch_lengths[LEN_C],
transmat, rates, nPos, /*nConstraints*/0);
qopt.pair1 = pD;
qopt.pair2 = pABC;
branch_lengths[LEN_D] = onedimenmin(/*xmin*/MLMinBranchLength,
/*xguess*/branch_lengths[LEN_D],
/*xmax*/6.0,
PairNegLogLk,
/*data*/&qopt,
/*ftol*/MLFTolBranchLength,
/*atol*/MLMinBranchLengthTolerance,
/*OUT*/&negloglk,
/*OUT*/&f2x);
/* Compute the total quartet likelihood
PairLogLk(ABC,D) + PairLogLk(AB,C) + PairLogLk(A,B)
*/
double loglkABCvsD = -negloglk;
if (site_likelihoods) {
for (j = 0; j < nPos; j++)
site_likelihoods[j] = 1.0;
PairLogLk(pABC, pD, branch_lengths[LEN_D],
qopt.nPos, qopt.transmat, qopt.rates, /*IN/OUT*/site_likelihoods);
}
double quartetloglk = loglkABCvsD
+ PairLogLk(pAB, pC, branch_lengths[LEN_I] + branch_lengths[LEN_C],
qopt.nPos, qopt.transmat, qopt.rates,
/*IN/OUT*/site_likelihoods)
+ PairLogLk(pA, pB, branch_lengths[LEN_A] + branch_lengths[LEN_B],
qopt.nPos, qopt.transmat, qopt.rates,
/*IN/OUT*/site_likelihoods);
pABC = FreeProfile(pABC, nPos, /*nConstraints*/0);
pAB = FreeProfile(pAB, nPos, /*nConstraints*/0);
if (verbose > 3) {
double loglkStart = MLQuartetLogLk(pA, pB, pC, pD, nPos, transmat, rates, start_length, /*site_lk*/NULL);
fprintf(stderr, "Optimize loglk from %.5f to %.5f eval %d lengths from\n"
" %.5f %.5f %.5f %.5f %.5f to\n"
" %.5f %.5f %.5f %.5f %.5f\n",
loglkStart, quartetloglk, qopt.nEval,
start_length[0], start_length[1], start_length[2], start_length[3], start_length[4],
branch_lengths[0], branch_lengths[1], branch_lengths[2], branch_lengths[3], branch_lengths[4]);
}
return(quartetloglk);
}
nni_t MLQuartetNNI(profile_t *profiles[4],
/*OPTIONAL*/transition_matrix_t *transmat,
rates_t *rates,
int nPos, int nConstraints,
/*OUT*/double criteria[3], /* The three potential quartet log-likelihoods */
/*IN/OUT*/numeric_t len[5],
bool bFast)
{
int i;
double lenABvsCD[5] = {len[LEN_A], len[LEN_B], len[LEN_C], len[LEN_D], len[LEN_I]};
double lenACvsBD[5] = {len[LEN_A], len[LEN_C], len[LEN_B], len[LEN_D], len[LEN_I]}; /* Swap B & C */
double lenADvsBC[5] = {len[LEN_A], len[LEN_D], len[LEN_C], len[LEN_B], len[LEN_I]}; /* Swap B & D */
bool bConsiderAC = true;
bool bConsiderAD = true;
int iRound;
int nRounds = mlAccuracy < 2 ? 2 : mlAccuracy;
double penalty[3];
QuartetConstraintPenalties(profiles, nConstraints, /*OUT*/penalty);
if (penalty[ABvsCD] > penalty[ACvsBD] || penalty[ABvsCD] > penalty[ADvsBC])
bFast = false;
#ifdef OPENMP
bFast = false; /* turn off star topology test */
#endif
for (iRound = 0; iRound < nRounds; iRound++) {
bool bStarTest = false;
{
#ifdef OPENMP
#pragma omp parallel
#pragma omp sections
#endif
{
#ifdef OPENMP
#pragma omp section
#endif
{
criteria[ABvsCD] = MLQuartetOptimize(profiles[0], profiles[1], profiles[2], profiles[3],
nPos, transmat, rates,
/*IN/OUT*/lenABvsCD,
bFast ? &bStarTest : NULL,
/*site_likelihoods*/NULL)
- penalty[ABvsCD]; /* subtract penalty b/c we are trying to maximize log lk */
}
#ifdef OPENMP
#pragma omp section
#else
if (bStarTest) {
nStarTests++;
criteria[ACvsBD] = -1e20;
criteria[ADvsBC] = -1e20;
len[LEN_I] = lenABvsCD[LEN_I];
return(ABvsCD);
}
#endif
{
if (bConsiderAC)
criteria[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3],
nPos, transmat, rates,
/*IN/OUT*/lenACvsBD, NULL, /*site_likelihoods*/NULL)
- penalty[ACvsBD];
}
#ifdef OPENMP
#pragma omp section
#endif
{
if (bConsiderAD)
criteria[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1],
nPos, transmat, rates,
/*IN/OUT*/lenADvsBC, NULL, /*site_likelihoods*/NULL)
- penalty[ADvsBC];
}
}
} /* end parallel sections */
if (mlAccuracy < 2) {
/* If clearly worse then ABvsCD, or have short internal branch length and worse, then
give up */
if (criteria[ACvsBD] < criteria[ABvsCD] - closeLogLkLimit
|| (lenACvsBD[LEN_I] <= 2.0*MLMinBranchLength && criteria[ACvsBD] < criteria[ABvsCD]))
bConsiderAC = false;
if (criteria[ADvsBC] < criteria[ABvsCD] - closeLogLkLimit
|| (lenADvsBC[LEN_I] <= 2.0*MLMinBranchLength && criteria[ADvsBC] < criteria[ABvsCD]))
bConsiderAD = false;
if (!bConsiderAC && !bConsiderAD)
break;
/* If clearly better than either alternative, then give up
(Comparison is probably biased in favor of ABvsCD anyway) */
if (criteria[ACvsBD] > criteria[ABvsCD] + closeLogLkLimit
&& criteria[ACvsBD] > criteria[ADvsBC] + closeLogLkLimit)
break;
if (criteria[ADvsBC] > criteria[ABvsCD] + closeLogLkLimit
&& criteria[ADvsBC] > criteria[ACvsBD] + closeLogLkLimit)
break;
}
} /* end loop over rounds */
if (verbose > 2) {
fprintf(stderr, "Optimized quartet for %d rounds: ABvsCD %.5f ACvsBD %.5f ADvsBC %.5f\n",
iRound, criteria[ABvsCD], criteria[ACvsBD], criteria[ADvsBC]);
}
if (criteria[ACvsBD] > criteria[ABvsCD] && criteria[ACvsBD] > criteria[ADvsBC]) {
for (i = 0; i < 5; i++) len[i] = lenACvsBD[i];
return(ACvsBD);
} else if (criteria[ADvsBC] > criteria[ABvsCD] && criteria[ADvsBC] > criteria[ACvsBD]) {
for (i = 0; i < 5; i++) len[i] = lenADvsBC[i];
return(ADvsBC);
} else {
for (i = 0; i < 5; i++) len[i] = lenABvsCD[i];
return(ABvsCD);
}
}
double TreeLength(/*IN/OUT*/NJ_t *NJ, bool recomputeProfiles) {
if (recomputeProfiles) {
traversal_t traversal2 = InitTraversal(NJ);
int j = NJ->root;
while((j = TraversePostorder(j, NJ, /*IN/OUT*/traversal2, /*pUp*/NULL)) >= 0) {
/* nothing to do for leaves or root */
if (j >= NJ->nSeq && j != NJ->root)
SetProfile(/*IN/OUT*/NJ, j, /*noweight*/-1.0);
}
traversal2 = FreeTraversal(traversal2,NJ);
}
UpdateBranchLengths(/*IN/OUT*/NJ);
double total_len = 0;
int iNode;
for (iNode = 0; iNode < NJ->maxnode; iNode++)
total_len += NJ->branchlength[iNode];
return(total_len);
}
double TreeLogLk(/*IN*/NJ_t *NJ, /*OPTIONAL OUT*/double *site_loglk) {
int i;
if (NJ->nSeq < 2)
return(0.0);
double loglk = 0.0;
double *site_likelihood = NULL;
if (site_loglk != NULL) {
site_likelihood = mymalloc(sizeof(double)*NJ->nPos);
for (i = 0; i < NJ->nPos; i++) {
site_likelihood[i] = 1.0;
site_loglk[i] = 0.0;
}
}
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
int nChild = NJ->child[node].nChild;
if (nChild == 0)
continue;
assert(nChild >= 2);
int *children = NJ->child[node].child;
double loglkchild = PairLogLk(NJ->profiles[children[0]], NJ->profiles[children[1]],
NJ->branchlength[children[0]]+NJ->branchlength[children[1]],
NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/site_likelihood);
loglk += loglkchild;
if (site_likelihood != NULL) {
/* prevent underflows */
for (i = 0; i < NJ->nPos; i++) {
while(site_likelihood[i] < LkUnderflow) {
site_likelihood[i] *= LkUnderflowInv;
site_loglk[i] -= LogLkUnderflow;
}
}
}
if (verbose > 2)
fprintf(stderr, "At %d: LogLk(%d:%.4f,%d:%.4f) = %.3f\n",
node,
children[0], NJ->branchlength[children[0]],
children[1], NJ->branchlength[children[1]],
loglkchild);
if (NJ->child[node].nChild == 3) {
assert(node == NJ->root);
/* Infer the common parent of the 1st two to define the third... */
profile_t *pAB = PosteriorProfile(NJ->profiles[children[0]],
NJ->profiles[children[1]],
NJ->branchlength[children[0]],
NJ->branchlength[children[1]],
NJ->transmat, &NJ->rates,
NJ->nPos, /*nConstraints*/0);
double loglkup = PairLogLk(pAB, NJ->profiles[children[2]],
NJ->branchlength[children[2]],
NJ->nPos, NJ->transmat, &NJ->rates,
/*IN/OUT*/site_likelihood);
loglk += loglkup;
if (verbose > 2)
fprintf(stderr, "At root %d: LogLk((%d/%d),%d:%.3f) = %.3f\n",
node, children[0], children[1], children[2],
NJ->branchlength[children[2]],
loglkup);
pAB = FreeProfile(pAB, NJ->nPos, NJ->nConstraints);
}
}
traversal = FreeTraversal(traversal,NJ);
if (site_likelihood != NULL) {
for (i = 0; i < NJ->nPos; i++) {
site_loglk[i] += log(site_likelihood[i]);
}
site_likelihood = myfree(site_likelihood, sizeof(double)*NJ->nPos);
}
/* For Jukes-Cantor, with a tree of size 4, if the children of the root are
(A,B), C, and D, then
P(ABCD) = P(A) P(B|A) P(C|AB) P(D|ABC)
Above we compute P(B|A) P(C|AB) P(D|ABC) -- note P(B|A) is at the child of root
and P(C|AB) P(D|ABC) is at root.
Similarly if the children of the root are C, D, and (A,B), then
P(ABCD) = P(C|D) P(A|B) P(AB|CD) P(D), and above we compute that except for P(D)
So we need to multiply by P(A) = 0.25, so we pay log(4) at each position
(if ungapped). Each gapped position in any sequence reduces the payment by log(4)
For JTT or GTR, we are computing P(A & B) and the posterior profiles are scaled to take
the prior into account, so we do not need any correction.
codeFreq[NOCODE] is scaled x higher so that P(-) = 1 not P(-)=1/nCodes, so gaps
do not need to be corrected either.
*/
if (nCodes == 4 && NJ->transmat == NULL) {
int nGaps = 0;
double logNCodes = log((double)nCodes);
for (i = 0; i < NJ->nPos; i++) {
int nGapsThisPos = 0;
for (node = 0; node < NJ->nSeq; node++) {
unsigned char *codes = NJ->profiles[node]->codes;
if (codes[i] == NOCODE)
nGapsThisPos++;
}
nGaps += nGapsThisPos;
if (site_loglk != NULL) {
site_loglk[i] += nGapsThisPos * logNCodes;
if (nCodes == 4 && NJ->transmat == NULL)
site_loglk[i] -= logNCodes;
}
}
loglk -= NJ->nPos * logNCodes;
loglk += nGaps * logNCodes; /* do not pay for gaps -- only Jukes-Cantor */
}
return(loglk);
}
void SetMLGtr(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL IN*/double *freq_in, /*OPTIONAL WRITE*/FILE *fpLog) {
int i;
assert(nCodes==4);
gtr_opt_t gtr;
gtr.NJ = NJ;
gtr.fpLog = fpLog;
if (freq_in != NULL) {
for (i=0; i<4; i++)
gtr.freq[i]=freq_in[i];
} else {
/* n[] and sum were int in FastTree 2.1.9 and earlier -- this
caused gtr analyses to fail on analyses with >2e9 positions */
long n[4] = {1,1,1,1}; /* pseudocounts */
for (i=0; i<NJ->nSeq; i++) {
unsigned char *codes = NJ->profiles[i]->codes;
int iPos;
for (iPos=0; iPos<NJ->nPos; iPos++)
if (codes[iPos] < 4)
n[codes[iPos]]++;
}
long sum = n[0]+n[1]+n[2]+n[3];
for (i=0; i<4; i++)
gtr.freq[i] = n[i]/(double)sum;
}
for (i=0; i<6; i++)
gtr.rates[i] = 1.0;
int nRounds = mlAccuracy < 2 ? 2 : mlAccuracy;
for (i = 0; i < nRounds; i++) {
for (gtr.iRate = 0; gtr.iRate < 6; gtr.iRate++) {
ProgressReport("Optimizing GTR model, step %d of %d", i*6+gtr.iRate+1, 12, 0, 0);
double negloglk, f2x;
gtr.rates[gtr.iRate] = onedimenmin(/*xmin*/0.05,
/*xguess*/gtr.rates[gtr.iRate],
/*xmax*/20.0,
GTRNegLogLk,
/*data*/>r,
/*ftol*/0.001,
/*atol*/0.0001,
/*OUT*/&negloglk,
/*OUT*/&f2x);
}
}
/* normalize gtr so last rate is 1 -- specifying that rate separately is useful for optimization only */
for (i = 0; i < 5; i++)
gtr.rates[i] /= gtr.rates[5];
gtr.rates[5] = 1.0;
if (verbose) {
fprintf(stderr, "GTR Frequencies: %.4f %.4f %.4f %.4f\n", gtr.freq[0], gtr.freq[1], gtr.freq[2], gtr.freq[3]);
fprintf(stderr, "GTR rates(ac ag at cg ct gt) %.4f %.4f %.4f %.4f %.4f %.4f\n",
gtr.rates[0],gtr.rates[1],gtr.rates[2],gtr.rates[3],gtr.rates[4],gtr.rates[5]);
}
if (fpLog != NULL) {
fprintf(fpLog, "GTRFreq\t%.4f\t%.4f\t%.4f\t%.4f\n", gtr.freq[0], gtr.freq[1], gtr.freq[2], gtr.freq[3]);
fprintf(fpLog, "GTRRates\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\n",
gtr.rates[0],gtr.rates[1],gtr.rates[2],gtr.rates[3],gtr.rates[4],gtr.rates[5]);
}
myfree(NJ->transmat, sizeof(transition_matrix_t));
NJ->transmat = CreateGTR(gtr.rates, gtr.freq);
RecomputeMLProfiles(/*IN/OUT*/NJ);
OptimizeAllBranchLengths(/*IN/OUT*/NJ);
}
double GTRNegLogLk(double x, void *data) {
gtr_opt_t *gtr = (gtr_opt_t*)data;
assert(nCodes == 4);
assert(gtr->NJ != NULL);
assert(gtr->iRate >= 0 && gtr->iRate < 6);
assert(x > 0);
transition_matrix_t *old = gtr->NJ->transmat;
double rates[6];
int i;
for (i = 0; i < 6; i++)
rates[i] = gtr->rates[i];
rates[gtr->iRate] = x;
FILE *fpLog = gtr->fpLog;
if (fpLog)
fprintf(fpLog, "GTR_Opt\tfreq %.5f %.5f %.5f %.5f rates %.5f %.5f %.5f %.5f %.5f %.5f\n",
gtr->freq[0], gtr->freq[1], gtr->freq[2], gtr->freq[3],
rates[0], rates[1], rates[2], rates[3], rates[4], rates[5]);
gtr->NJ->transmat = CreateGTR(rates, gtr->freq);
RecomputeMLProfiles(/*IN/OUT*/gtr->NJ);
double loglk = TreeLogLk(gtr->NJ, /*site_loglk*/NULL);
myfree(gtr->NJ->transmat, sizeof(transition_matrix_t));
gtr->NJ->transmat = old;
/* Do not recompute profiles -- assume the caller will do that */
if (verbose > 2)
fprintf(stderr, "GTR LogLk(%.5f %.5f %.5f %.5f %.5f %.5f) = %f\n",
rates[0], rates[1], rates[2], rates[3], rates[4], rates[5], loglk);
if (fpLog)
fprintf(fpLog, "GTR_Opt\tGTR LogLk(%.5f %.5f %.5f %.5f %.5f %.5f) = %f\n",
rates[0], rates[1], rates[2], rates[3], rates[4], rates[5], loglk);
return(-loglk);
}
/* Caller must free the resulting vector of n rates */
numeric_t *MLSiteRates(int nRateCategories) {
/* Even spacing from 1/nRate to nRate */
double logNCat = log((double)nRateCategories);
double logMinRate = -logNCat;
double logMaxRate = logNCat;
double logd = (logMaxRate-logMinRate)/(double)(nRateCategories-1);
numeric_t *rates = mymalloc(sizeof(numeric_t)*nRateCategories);
int i;
for (i = 0; i < nRateCategories; i++)
rates[i] = exp(logMinRate + logd*(double)i);
return(rates);
}
double *MLSiteLikelihoodsByRate(/*IN*/NJ_t *NJ, /*IN*/numeric_t *rates, int nRateCategories) {
double *site_loglk = mymalloc(sizeof(double)*NJ->nPos*nRateCategories);
/* save the original rates */
assert(NJ->rates.nRateCategories > 0);
numeric_t *oldRates = NJ->rates.rates;
NJ->rates.rates = mymalloc(sizeof(numeric_t) * NJ->rates.nRateCategories);
/* Compute site likelihood for each rate */
int iPos;
int iRate;
for (iRate = 0; iRate < nRateCategories; iRate++) {
int i;
for (i = 0; i < NJ->rates.nRateCategories; i++)
NJ->rates.rates[i] = rates[iRate];
RecomputeMLProfiles(/*IN/OUT*/NJ);
double loglk = TreeLogLk(NJ, /*OUT*/&site_loglk[NJ->nPos*iRate]);
ProgressReport("Site likelihoods with rate category %d of %d", iRate+1, nRateCategories, 0, 0);
if(verbose > 2) {
fprintf(stderr, "Rate %.3f Loglk %.3f SiteLogLk", rates[iRate], loglk);
for (iPos = 0; iPos < NJ->nPos; iPos++)
fprintf(stderr,"\t%.3f", site_loglk[NJ->nPos*iRate + iPos]);
fprintf(stderr,"\n");
}
}
/* restore original rates and profiles */
myfree(NJ->rates.rates, sizeof(numeric_t) * NJ->rates.nRateCategories);
NJ->rates.rates = oldRates;
RecomputeMLProfiles(/*IN/OUT*/NJ);
return(site_loglk);
}
void SetMLRates(/*IN/OUT*/NJ_t *NJ, int nRateCategories) {
assert(nRateCategories > 0);
AllocRateCategories(/*IN/OUT*/&NJ->rates, 1, NJ->nPos); /* set to 1 category of rate 1 */
if (nRateCategories == 1) {
RecomputeMLProfiles(/*IN/OUT*/NJ);
return;
}
numeric_t *rates = MLSiteRates(nRateCategories);
double *site_loglk = MLSiteLikelihoodsByRate(/*IN*/NJ, /*IN*/rates, nRateCategories);
/* Select best rate for each site, correcting for the prior
For a prior, use a gamma distribution with shape parameter 3, scale 1/3, so
Prior(rate) ~ rate**2 * exp(-3*rate)
log Prior(rate) = C + 2 * log(rate) - 3 * rate
*/
double sumRates = 0;
int iPos;
int iRate;
for (iPos = 0; iPos < NJ->nPos; iPos++) {
int iBest = -1;
double dBest = -1e20;
for (iRate = 0; iRate < nRateCategories; iRate++) {
double site_loglk_with_prior = site_loglk[NJ->nPos*iRate + iPos]
+ 2.0 * log(rates[iRate]) - 3.0 * rates[iRate];
if (site_loglk_with_prior > dBest) {
iBest = iRate;
dBest = site_loglk_with_prior;
}
}
if (verbose > 2)
fprintf(stderr, "Selected rate category %d rate %.3f for position %d\n",
iBest, rates[iBest], iPos+1);
NJ->rates.ratecat[iPos] = iBest;
sumRates += rates[iBest];
}
site_loglk = myfree(site_loglk, sizeof(double)*NJ->nPos*nRateCategories);
/* Force the rates to average to 1 */
double avgRate = sumRates/NJ->nPos;
for (iRate = 0; iRate < nRateCategories; iRate++)
rates[iRate] /= avgRate;
/* Save the rates */
NJ->rates.rates = myfree(NJ->rates.rates, sizeof(numeric_t) * NJ->rates.nRateCategories);
NJ->rates.rates = rates;
NJ->rates.nRateCategories = nRateCategories;
/* Update profiles based on rates */
RecomputeMLProfiles(/*IN/OUT*/NJ);
if (verbose) {
fprintf(stderr, "Switched to using %d rate categories (CAT approximation)\n", nRateCategories);
fprintf(stderr, "Rate categories were divided by %.3f so that average rate = 1.0\n", avgRate);
fprintf(stderr, "CAT-based log-likelihoods may not be comparable across runs\n");
if (!gammaLogLk)
fprintf(stderr, "Use -gamma for approximate but comparable Gamma(20) log-likelihoods\n");
}
}
double GammaLogLk(/*IN*/siteratelk_t *s, /*OPTIONAL OUT*/double *gamma_loglk_sites) {
int iRate, iPos;
double *dRate = mymalloc(sizeof(double) * s->nRateCats);
for (iRate = 0; iRate < s->nRateCats; iRate++) {
/* The probability density for each rate is approximated by the total
density between the midpoints */
double pMin = iRate == 0 ? 0.0 :
PGamma(s->mult * (s->rates[iRate-1] + s->rates[iRate])/2.0, s->alpha);
double pMax = iRate == s->nRateCats-1 ? 1.0 :
PGamma(s->mult * (s->rates[iRate]+s->rates[iRate+1])/2.0, s->alpha);
dRate[iRate] = pMax-pMin;
}
double loglk = 0.0;
for (iPos = 0; iPos < s->nPos; iPos++) {
/* Prevent underflow on large trees by comparing to maximum loglk */
double maxloglk = -1e20;
for (iRate = 0; iRate < s->nRateCats; iRate++) {
double site_loglk = s->site_loglk[s->nPos*iRate + iPos];
if (site_loglk > maxloglk)
maxloglk = site_loglk;
}
double rellk = 0; /* likelihood scaled by exp(maxloglk) */
for (iRate = 0; iRate < s->nRateCats; iRate++) {
double lk = exp(s->site_loglk[s->nPos*iRate + iPos] - maxloglk);
rellk += lk * dRate[iRate];
}
double loglk_site = maxloglk + log(rellk);
loglk += loglk_site;
if (gamma_loglk_sites != NULL)
gamma_loglk_sites[iPos] = loglk_site;
}
dRate = myfree(dRate, sizeof(double)*s->nRateCats);
return(loglk);
}
double OptAlpha(double alpha, void *data) {
siteratelk_t *s = (siteratelk_t *)data;
s->alpha = alpha;
return(-GammaLogLk(s, NULL));
}
double OptMult(double mult, void *data) {
siteratelk_t *s = (siteratelk_t *)data;
s->mult = mult;
return(-GammaLogLk(s, NULL));
}
/* Input site_loglk must be for each rate */
double RescaleGammaLogLk(int nPos, int nRateCats, /*IN*/numeric_t *rates, /*IN*/double *site_loglk,
/*OPTIONAL*/FILE *fpLog) {
siteratelk_t s = { /*mult*/1.0, /*alpha*/1.0, nPos, nRateCats, rates, site_loglk };
double fx, f2x;
int i;
fx = -GammaLogLk(&s, NULL);
if (verbose>2)
fprintf(stderr, "Optimizing alpha, starting at loglk %.3f\n", -fx);
for (i = 0; i < 10; i++) {
ProgressReport("Optimizing alpha round %d", i+1, 0, 0, 0);
double start = fx;
s.alpha = onedimenmin(0.01, s.alpha, 10.0, OptAlpha, &s, 0.001, 0.001, &fx, &f2x);
if (verbose>2)
fprintf(stderr, "Optimize alpha round %d to %.3f lk %.3f\n", i+1, s.alpha, -fx);
s.mult = onedimenmin(0.01, s.mult, 10.0, OptMult, &s, 0.001, 0.001, &fx, &f2x);
if (verbose>2)
fprintf(stderr, "Optimize mult round %d to %.3f lk %.3f\n", i+1, s.mult, -fx);
if (fx > start - 0.001) {
if (verbose>2)
fprintf(stderr, "Optimizing alpha & mult converged\n");
break;
}
}
double *gamma_loglk_sites = mymalloc(sizeof(double) * nPos);
double gammaLogLk = GammaLogLk(&s, /*OUT*/gamma_loglk_sites);
if (verbose > 0)
fprintf(stderr, "Gamma(%d) LogLk = %.3f alpha = %.3f rescaling lengths by %.3f\n",
nRateCats, gammaLogLk, s.alpha, 1/s.mult);
if (fpLog) {
int iPos;
int iRate;
fprintf(fpLog, "Gamma%dLogLk\t%.3f\tApproximate\tAlpha\t%.3f\tRescale\t%.3f\n",
nRateCats, gammaLogLk, s.alpha, 1/s.mult);
fprintf(fpLog, "Gamma%d\tSite\tLogLk", nRateCats);
for (iRate = 0; iRate < nRateCats; iRate++)
fprintf(fpLog, "\tr=%.3f", rates[iRate]/s.mult);
fprintf(fpLog,"\n");
for (iPos = 0; iPos < nPos; iPos++) {
fprintf(fpLog, "Gamma%d\t%d\t%.3f", nRateCats, iPos, gamma_loglk_sites[iPos]);
for (iRate = 0; iRate < nRateCats; iRate++)
fprintf(fpLog, "\t%.3f", site_loglk[nPos*iRate + iPos]);
fprintf(fpLog,"\n");
}
}
gamma_loglk_sites = myfree(gamma_loglk_sites, sizeof(double) * nPos);
return(1.0/s.mult);
}
double MLPairOptimize(profile_t *pA, profile_t *pB,
int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates,
/*IN/OUT*/double *branch_length) {
quartet_opt_t qopt = { nPos, transmat, rates,
/*nEval*/0, /*pair1*/pA, /*pair2*/pB };
double f2x,negloglk;
*branch_length = onedimenmin(/*xmin*/MLMinBranchLength,
/*xguess*/*branch_length,
/*xmax*/6.0,
PairNegLogLk,
/*data*/&qopt,
/*ftol*/MLFTolBranchLength,
/*atol*/MLMinBranchLengthTolerance,
/*OUT*/&negloglk,
/*OUT*/&f2x);
return(-negloglk); /* the log likelihood */
}
void OptimizeAllBranchLengths(/*IN/OUT*/NJ_t *NJ) {
if (NJ->nSeq < 2)
return;
if (NJ->nSeq == 2) {
int parent = NJ->root;
assert(NJ->child[parent].nChild==2);
int nodes[2] = { NJ->child[parent].child[0], NJ->child[parent].child[1] };
double length = 1.0;
(void)MLPairOptimize(NJ->profiles[nodes[0]], NJ->profiles[nodes[1]],
NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/&length);
NJ->branchlength[nodes[0]] = length/2.0;
NJ->branchlength[nodes[1]] = length/2.0;
return;
};
traversal_t traversal = InitTraversal(NJ);
profile_t **upProfiles = UpProfiles(NJ);
int node = NJ->root;
int iDone = 0;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
int nChild = NJ->child[node].nChild;
if (nChild > 0) {
if ((iDone % 100) == 0)
ProgressReport("ML Lengths %d of %d splits", iDone+1, NJ->maxnode - NJ->nSeq, 0, 0);
iDone++;
/* optimize the branch lengths between self, parent, and children,
with two iterations
*/
assert(nChild == 2 || nChild == 3);
int nodes[3] = { NJ->child[node].child[0],
NJ->child[node].child[1],
nChild == 3 ? NJ->child[node].child[2] : node };
profile_t *profiles[3] = { NJ->profiles[nodes[0]],
NJ->profiles[nodes[1]],
nChild == 3 ? NJ->profiles[nodes[2]]
: GetUpProfile(/*IN/OUT*/upProfiles, NJ, node, /*useML*/true) };
int iter;
for (iter = 0; iter < 2; iter++) {
int i;
for (i = 0; i < 3; i++) {
profile_t *pA = profiles[i];
int b1 = (i+1) % 3;
int b2 = (i+2) % 3;
profile_t *pB = PosteriorProfile(profiles[b1], profiles[b2],
NJ->branchlength[nodes[b1]],
NJ->branchlength[nodes[b2]],
NJ->transmat, &NJ->rates, NJ->nPos, /*nConstraints*/0);
double len = NJ->branchlength[nodes[i]];
if (len < MLMinBranchLength)
len = MLMinBranchLength;
(void)MLPairOptimize(pA, pB, NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/&len);
NJ->branchlength[nodes[i]] = len;
pB = FreeProfile(pB, NJ->nPos, /*nConstraints*/0);
if (verbose>3)
fprintf(stderr, "Optimize length for %d to %.3f\n",
nodes[i], NJ->branchlength[nodes[i]]);
}
}
if (node != NJ->root) {
RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, /*useML*/true);
DeleteUpProfile(upProfiles, NJ, node);
}
}
}
traversal = FreeTraversal(traversal,NJ);
upProfiles = FreeUpProfiles(upProfiles,NJ);
}
void RecomputeMLProfiles(/*IN/OUT*/NJ_t *NJ) {
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
if (NJ->child[node].nChild == 2) {
NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints);
int *children = NJ->child[node].child;
NJ->profiles[node] = PosteriorProfile(NJ->profiles[children[0]], NJ->profiles[children[1]],
NJ->branchlength[children[0]], NJ->branchlength[children[1]],
NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints);
}
}
traversal = FreeTraversal(traversal, NJ);
}
void RecomputeProfiles(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL*/distance_matrix_t *dmat) {
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
if (NJ->child[node].nChild == 2) {
int *child = NJ->child[node].child;
NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints);
NJ->profiles[node] = AverageProfile(NJ->profiles[child[0]], NJ->profiles[child[1]],
NJ->nPos, NJ->nConstraints,
dmat, /*unweighted*/-1.0);
}
}
traversal = FreeTraversal(traversal,NJ);
}
int NNI(/*IN/OUT*/NJ_t *NJ, int iRound, int nRounds, bool useML,
/*IN/OUT*/nni_stats_t *stats,
/*OUT*/double *dMaxDelta) {
/* For each non-root node N, with children A,B, sibling C, and uncle D,
we compare the current topology AB|CD to the alternate topologies
AC|BD and AD|BC, by using the 4 relevant profiles.
If useML is true, it uses quartet maximum likelihood, and it
updates branch lengths as it goes.
If useML is false, it uses the minimum-evolution criterion with
log-corrected distances on profiles. (If logdist is false, then
the log correction is not done.) If useML is false, then NNI()
does NOT modify the branch lengths.
Regardless of whether it changes the topology, it recomputes the
profile for the node, using the pairwise distances and BIONJ-like
weightings (if bionj is set). The parent's profile has changed,
but recomputing it is not necessary because we will visit it
before we need it (we use postorder, so we may visit the sibling
and its children before we visit the parent, but we never
consider an ancestor's profile, so that is OK). When we change
the parent's profile, this alters the uncle's up-profile, so we
remove that. Finally, if the topology has changed, we remove the
up-profiles of the nodes.
If we do an NNI during post-order traversal, the result is a bit
tricky. E.g. if we are at node N, and have visited its children A
and B but not its uncle C, and we do an NNI that swaps B & C,
then the post-order traversal will visit C, and its children, but
then on the way back up, it will skip N, as it has already
visited it. So, the profile of N will not be recomputed: any
changes beneath C will not be reflected in the profile of N, and
the profile of N will be slightly stale. This will be corrected
on the next round of NNIs.
*/
double supportThreshold = useML ? treeLogLkDelta : MEMinDelta;
int i;
*dMaxDelta = 0.0;
int nNNIThisRound = 0;
if (NJ->nSeq <= 3)
return(0); /* nothing to do */
if (verbose > 2) {
fprintf(stderr, "Beginning round %d of NNIs with ml? %d\n", iRound, useML?1:0);
PrintNJInternal(/*WRITE*/stderr, NJ, /*useLen*/useML && iRound > 0 ? 1 : 0);
}
/* For each node the upProfile or NULL */
profile_t **upProfiles = UpProfiles(NJ);
traversal_t traversal = InitTraversal(NJ);
/* Identify nodes we can skip traversing into */
int node;
if (fastNNI) {
for (node = 0; node < NJ->maxnode; node++) {
if (node != NJ->root
&& node >= NJ->nSeq
&& stats[node].age >= 2
&& stats[node].subtreeAge >= 2
&& stats[node].support > supportThreshold) {
int nodeABCD[4];
SetupABCD(NJ, node, NULL, NULL, /*OUT*/nodeABCD, useML);
for (i = 0; i < 4; i++)
if (stats[nodeABCD[i]].age == 0 && stats[nodeABCD[i]].support > supportThreshold)
break;
if (i == 4) {
SkipTraversalInto(node, /*IN/OUT*/traversal);
if (verbose > 2)
fprintf(stderr, "Skipping subtree at %d: child %d %d parent %d age %d subtreeAge %d support %.3f\n",
node, nodeABCD[0], nodeABCD[1], NJ->parent[node],
stats[node].age, stats[node].subtreeAge, stats[node].support);
}
}
}
}
int iDone = 0;
bool bUp;
node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, &bUp)) >= 0) {
if (node < NJ->nSeq || node == NJ->root)
continue; /* nothing to do for leaves or root */
if (bUp) {
if(verbose > 2)
fprintf(stderr, "Going up back to node %d\n", node);
/* No longer needed */
for (i = 0; i < NJ->child[node].nChild; i++)
DeleteUpProfile(upProfiles, NJ, NJ->child[node].child[i]);
DeleteUpProfile(upProfiles, NJ, node);
RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, useML);
continue;
}
if ((iDone % 100) == 0) {
char buf[100];
sprintf(buf, "%s NNI round %%d of %%d, %%d of %%d splits", useML ? "ML" : "ME");
if (iDone > 0)
sprintf(buf+strlen(buf), ", %d changes", nNNIThisRound);
if (nNNIThisRound > 0)
sprintf(buf+strlen(buf), " (max delta %.3f)", *dMaxDelta);
ProgressReport(buf, iRound+1, nRounds, iDone+1, NJ->maxnode - NJ->nSeq);
}
iDone++;
profile_t *profiles[4];
int nodeABCD[4];
/* Note -- during the first round of ML NNIs, we use the min-evo-based branch lengths,
which may be suboptimal */
SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML);
/* Given our 4 profiles, consider doing a swap */
int nodeA = nodeABCD[0];
int nodeB = nodeABCD[1];
int nodeC = nodeABCD[2];
int nodeD = nodeABCD[3];
nni_t choice = ABvsCD;
if (verbose > 2)
fprintf(stderr,"Considering NNI around %d: Swap A=%d B=%d C=%d D=up(%d) or parent %d\n",
node, nodeA, nodeB, nodeC, nodeD, NJ->parent[node]);
if (verbose > 3 && useML) {
double len[5] = { NJ->branchlength[nodeA], NJ->branchlength[nodeB], NJ->branchlength[nodeC], NJ->branchlength[nodeD],
NJ->branchlength[node] };
for (i=0; i < 5; i++)
if (len[i] < MLMinBranchLength)
len[i] = MLMinBranchLength;
fprintf(stderr, "Starting quartet likelihood %.3f len %.3f %.3f %.3f %.3f %.3f\n",
MLQuartetLogLk(profiles[0],profiles[1],profiles[2],profiles[3],NJ->nPos,NJ->transmat,&NJ->rates,len, /*site_lk*/NULL),
len[0], len[1], len[2], len[3], len[4]);
}
numeric_t newlength[5];
double criteria[3];
if (useML) {
for (i = 0; i < 4; i++)
newlength[i] = NJ->branchlength[nodeABCD[i]];
newlength[4] = NJ->branchlength[node];
bool bFast = mlAccuracy < 2 && stats[node].age > 0;
choice = MLQuartetNNI(profiles, NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints,
/*OUT*/criteria, /*IN/OUT*/newlength, bFast);
} else {
choice = ChooseNNI(profiles, NJ->distance_matrix, NJ->nPos, NJ->nConstraints,
/*OUT*/criteria);
/* invert criteria so that higher is better, as in ML case, to simplify code below */
for (i = 0; i < 3; i++)
criteria[i] = -criteria[i];
}
if (choice == ACvsBD) {
/* swap B and C */
ReplaceChild(/*IN/OUT*/NJ, node, nodeB, nodeC);
ReplaceChild(/*IN/OUT*/NJ, NJ->parent[node], nodeC, nodeB);
} else if (choice == ADvsBC) {
/* swap A and C */
ReplaceChild(/*IN/OUT*/NJ, node, nodeA, nodeC);
ReplaceChild(/*IN/OUT*/NJ, NJ->parent[node], nodeC, nodeA);
}
if (useML) {
/* update branch length for the internal branch, and of any
branches that lead to leaves, b/c those will not are not
the internal branch for NNI and would not otherwise be set.
*/
if (choice == ADvsBC) {
/* For ADvsBC, MLQuartetNNI swaps B with D, but we swap A with C */
double length2[5] = { newlength[LEN_C], newlength[LEN_D],
newlength[LEN_A], newlength[LEN_B],
newlength[LEN_I] };
int i;
for (i = 0; i < 5; i++) newlength[i] = length2[i];
/* and swap A and C */
double tmp = newlength[LEN_A];
newlength[LEN_A] = newlength[LEN_C];
newlength[LEN_C] = tmp;
} else if (choice == ACvsBD) {
/* swap B and C */
double tmp = newlength[LEN_B];
newlength[LEN_B] = newlength[LEN_C];
newlength[LEN_C] = tmp;
}
NJ->branchlength[node] = newlength[LEN_I];
NJ->branchlength[nodeA] = newlength[LEN_A];
NJ->branchlength[nodeB] = newlength[LEN_B];
NJ->branchlength[nodeC] = newlength[LEN_C];
NJ->branchlength[nodeD] = newlength[LEN_D];
}
if (verbose>2 && (choice != ABvsCD || verbose > 2))
fprintf(stderr,"NNI around %d: Swap A=%d B=%d C=%d D=out(C) -- choose %s %s %.4f\n",
node, nodeA, nodeB, nodeC,
choice == ACvsBD ? "AC|BD" : (choice == ABvsCD ? "AB|CD" : "AD|BC"),
useML ? "delta-loglk" : "-deltaLen",
criteria[choice] - criteria[ABvsCD]);
if(verbose >= 3 && slow && useML)
fprintf(stderr, "Old tree lk -- %.4f\n", TreeLogLk(NJ, /*site_likelihoods*/NULL));
/* update stats, *dMaxDelta, etc. */
if (choice == ABvsCD) {
stats[node].age++;
} else {
if (useML)
nML_NNI++;
else
nNNI++;
nNNIThisRound++;
stats[node].age = 0;
stats[nodeA].age = 0;
stats[nodeB].age = 0;
stats[nodeC].age = 0;
stats[nodeD].age = 0;
}
stats[node].delta = criteria[choice] - criteria[ABvsCD]; /* 0 if ABvsCD */
if (stats[node].delta > *dMaxDelta)
*dMaxDelta = stats[node].delta;
/* support is improvement of score for self over better of alternatives */
stats[node].support = 1e20;
for (i = 0; i < 3; i++)
if (choice != i && criteria[choice]-criteria[i] < stats[node].support)
stats[node].support = criteria[choice]-criteria[i];
/* subtreeAge is the number of rounds since self or descendent had a significant improvement */
if (stats[node].delta > supportThreshold)
stats[node].subtreeAge = 0;
else {
stats[node].subtreeAge++;
for (i = 0; i < 2; i++) {
int child = NJ->child[node].child[i];
if (stats[node].subtreeAge > stats[child].subtreeAge)
stats[node].subtreeAge = stats[child].subtreeAge;
}
}
/* update profiles and free up unneeded up-profiles */
if (choice == ABvsCD) {
/* No longer needed */
DeleteUpProfile(upProfiles, NJ, nodeA);
DeleteUpProfile(upProfiles, NJ, nodeB);
DeleteUpProfile(upProfiles, NJ, nodeC);
RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, useML);
if(slow && useML)
UpdateForNNI(NJ, node, upProfiles, useML);
} else {
UpdateForNNI(NJ, node, upProfiles, useML);
}
if(verbose > 2 && slow && useML) {
/* Note we recomputed profiles back up to root already if slow */
PrintNJInternal(/*WRITE*/stderr, NJ, /*useLen*/true);
fprintf(stderr, "New tree lk -- %.4f\n", TreeLogLk(NJ, /*site_likelihoods*/NULL));
}
} /* end postorder traversal */
traversal = FreeTraversal(traversal,NJ);
if (verbose>=2) {
int nUp = 0;
for (i = 0; i < NJ->maxnodes; i++)
if (upProfiles[i] != NULL)
nUp++;
fprintf(stderr, "N up profiles at end of NNI: %d\n", nUp);
}
upProfiles = FreeUpProfiles(upProfiles,NJ);
return(nNNIThisRound);
}
nni_stats_t *InitNNIStats(NJ_t *NJ) {
nni_stats_t *stats = mymalloc(sizeof(nni_stats_t)*NJ->maxnode);
const int LargeAge = 1000000;
int i;
for (i = 0; i < NJ->maxnode; i++) {
stats[i].delta = 0;
stats[i].support = 0;
if (i == NJ->root || i < NJ->nSeq) {
stats[i].age = LargeAge;
stats[i].subtreeAge = LargeAge;
} else {
stats[i].age = 0;
stats[i].subtreeAge = 0;
}
}
return(stats);
}
nni_stats_t *FreeNNIStats(nni_stats_t *stats, NJ_t *NJ) {
return(myfree(stats, sizeof(nni_stats_t)*NJ->maxnode));
}
int FindSPRSteps(/*IN/OUT*/NJ_t *NJ,
int nodeMove, /* the node to move multiple times */
int nodeAround, /* sibling or parent of node to NNI to start the chain */
/*IN/OUT*/profile_t **upProfiles,
/*OUT*/spr_step_t *steps,
int maxSteps,
bool bFirstAC) {
int iStep;
for (iStep = 0; iStep < maxSteps; iStep++) {
if (NJ->child[nodeAround].nChild != 2)
break; /* no further to go */
/* Consider the NNIs around nodeAround */
profile_t *profiles[4];
int nodeABCD[4];
SetupABCD(NJ, nodeAround, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false);
double criteria[3];
(void) ChooseNNI(profiles, NJ->distance_matrix, NJ->nPos, NJ->nConstraints,
/*OUT*/criteria);
/* Do & save the swap */
spr_step_t *step = &steps[iStep];
if (iStep == 0 ? bFirstAC : criteria[ACvsBD] < criteria[ADvsBC]) {
/* swap B & C to put AC together */
step->deltaLength = criteria[ACvsBD] - criteria[ABvsCD];
step->nodes[0] = nodeABCD[1];
step->nodes[1] = nodeABCD[2];
} else {
/* swap AC to put AD together */
step->deltaLength = criteria[ADvsBC] - criteria[ABvsCD];
step->nodes[0] = nodeABCD[0];
step->nodes[1] = nodeABCD[2];
}
if (verbose>3) {
fprintf(stderr, "SPR chain step %d for %d around %d swap %d %d deltaLen %.5f\n",
iStep+1, nodeAround, nodeMove, step->nodes[0], step->nodes[1], step->deltaLength);
if (verbose>4)
PrintNJInternal(stderr, NJ, /*useLen*/false);
}
ReplaceChild(/*IN/OUT*/NJ, nodeAround, step->nodes[0], step->nodes[1]);
ReplaceChild(/*IN/OUT*/NJ, NJ->parent[nodeAround], step->nodes[1], step->nodes[0]);
UpdateForNNI(/*IN/OUT*/NJ, nodeAround, /*IN/OUT*/upProfiles, /*useML*/false);
/* set the new nodeAround -- either parent(nodeMove) or sibling(nodeMove) --
so that it different from current nodeAround
*/
int newAround[2] = { NJ->parent[nodeMove], Sibling(NJ, nodeMove) };
if (NJ->parent[nodeMove] == NJ->root)
RootSiblings(NJ, nodeMove, /*OUT*/newAround);
assert(newAround[0] == nodeAround || newAround[1] == nodeAround);
assert(newAround[0] != newAround[1]);
nodeAround = newAround[newAround[0] == nodeAround ? 1 : 0];
}
return(iStep);
}
void UnwindSPRStep(/*IN/OUT*/NJ_t *NJ,
/*IN*/spr_step_t *step,
/*IN/OUT*/profile_t **upProfiles) {
int parents[2];
int i;
for (i = 0; i < 2; i++) {
assert(step->nodes[i] >= 0 && step->nodes[i] < NJ->maxnodes);
parents[i] = NJ->parent[step->nodes[i]];
assert(parents[i] >= 0);
}
assert(parents[0] != parents[1]);
ReplaceChild(/*IN/OUT*/NJ, parents[0], step->nodes[0], step->nodes[1]);
ReplaceChild(/*IN/OUT*/NJ, parents[1], step->nodes[1], step->nodes[0]);
int iYounger = 0;
if (NJ->parent[parents[0]] == parents[1]) {
iYounger = 0;
} else {
assert(NJ->parent[parents[1]] == parents[0]);
iYounger = 1;
}
UpdateForNNI(/*IN/OUT*/NJ, parents[iYounger], /*IN/OUT*/upProfiles, /*useML*/false);
}
/* Update the profile of node and its ancestor, and delete nearby out-profiles */
void UpdateForNNI(/*IN/OUT*/NJ_t *NJ, int node, /*IN/OUT*/profile_t **upProfiles,
bool useML) {
int i;
if (slow) {
/* exhaustive update */
for (i = 0; i < NJ->maxnodes; i++)
DeleteUpProfile(upProfiles, NJ, i);
/* update profiles back to root */
int ancestor;
for (ancestor = node; ancestor >= 0; ancestor = NJ->parent[ancestor])
RecomputeProfile(/*IN/OUT*/NJ, upProfiles, ancestor, useML);
/* remove any up-profiles made while doing that*/
for (i = 0; i < NJ->maxnodes; i++)
DeleteUpProfile(upProfiles, NJ, i);
} else {
/* if fast, only update around self
note that upProfile(parent) is still OK after an NNI, but
up-profiles of uncles may not be
*/
DeleteUpProfile(upProfiles, NJ, node);
for (i = 0; i < NJ->child[node].nChild; i++)
DeleteUpProfile(upProfiles, NJ, NJ->child[node].child[i]);
assert(node != NJ->root);
int parent = NJ->parent[node];
int neighbors[2] = { parent, Sibling(NJ, node) };
if (parent == NJ->root)
RootSiblings(NJ, node, /*OUT*/neighbors);
DeleteUpProfile(upProfiles, NJ, neighbors[0]);
DeleteUpProfile(upProfiles, NJ, neighbors[1]);
int uncle = Sibling(NJ, parent);
if (uncle >= 0)
DeleteUpProfile(upProfiles, NJ, uncle);
RecomputeProfile(/*IN/OUT*/NJ, upProfiles, node, useML);
RecomputeProfile(/*IN/OUT*/NJ, upProfiles, parent, useML);
}
}
void SPR(/*IN/OUT*/NJ_t *NJ, int maxSPRLength, int iRound, int nRounds) {
/* Given a non-root node N with children A,B, sibling C, and uncle D,
we can try to move A by doing three types of moves (4 choices):
"down" -- swap A with a child of B (if B is not a leaf) [2 choices]
"over" -- swap B with C
"up" -- swap A with D
We follow down moves with down moves, over moves with down moves, and
up moves with either up or over moves. (Other choices are just backing
up and hence useless.)
As with NNIs, we keep track of up-profiles as we go. However, some of the regular
profiles may also become "stale" so it is a bit trickier.
We store the traversal before we do SPRs to avoid any possible infinite loop
*/
double last_tot_len = 0.0;
if (NJ->nSeq <= 3 || maxSPRLength < 1)
return;
if (slow)
last_tot_len = TreeLength(NJ, /*recomputeLengths*/true);
int *nodeList = mymalloc(sizeof(int) * NJ->maxnodes);
int nodeListLen = 0;
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
nodeList[nodeListLen++] = node;
}
assert(nodeListLen == NJ->maxnode);
traversal = FreeTraversal(traversal,NJ);
profile_t **upProfiles = UpProfiles(NJ);
spr_step_t *steps = mymalloc(sizeof(spr_step_t) * maxSPRLength); /* current chain of SPRs */
int i;
for (i = 0; i < nodeListLen; i++) {
node = nodeList[i];
if ((i % 100) == 0)
ProgressReport("SPR round %3d of %3d, %d of %d nodes",
iRound+1, nRounds, i+1, nodeListLen);
if (node == NJ->root)
continue; /* nothing to do for root */
/* The nodes to NNI around */
int nodeAround[2] = { NJ->parent[node], Sibling(NJ, node) };
if (NJ->parent[node] == NJ->root) {
/* NNI around both siblings instead */
RootSiblings(NJ, node, /*OUT*/nodeAround);
}
bool bChanged = false;
int iAround;
for (iAround = 0; iAround < 2 && bChanged == false; iAround++) {
int ACFirst;
for (ACFirst = 0; ACFirst < 2 && bChanged == false; ACFirst++) {
if(verbose > 3)
PrintNJInternal(stderr, NJ, /*useLen*/false);
int chainLength = FindSPRSteps(/*IN/OUT*/NJ, node, nodeAround[iAround],
upProfiles, /*OUT*/steps, maxSPRLength, (bool)ACFirst);
double dMinDelta = 0.0;
int iCBest = -1;
double dTotDelta = 0.0;
int iC;
for (iC = 0; iC < chainLength; iC++) {
dTotDelta += steps[iC].deltaLength;
if (dTotDelta < dMinDelta) {
dMinDelta = dTotDelta;
iCBest = iC;
}
}
if (verbose>3) {
fprintf(stderr, "SPR %s %d around %d chainLength %d of %d deltaLength %.5f swaps:",
iCBest >= 0 ? "move" : "abandoned",
node,nodeAround[iAround],iCBest+1,chainLength,dMinDelta);
for (iC = 0; iC < chainLength; iC++)
fprintf(stderr, " (%d,%d)%.4f", steps[iC].nodes[0], steps[iC].nodes[1], steps[iC].deltaLength);
fprintf(stderr,"\n");
}
for (iC = chainLength - 1; iC > iCBest; iC--)
UnwindSPRStep(/*IN/OUT*/NJ, /*IN*/&steps[iC], /*IN/OUT*/upProfiles);
if(verbose > 3)
PrintNJInternal(stderr, NJ, /*useLen*/false);
while (slow && iCBest >= 0) {
double expected_tot_len = last_tot_len + dMinDelta;
double new_tot_len = TreeLength(NJ, /*recompute*/true);
if (verbose > 2)
fprintf(stderr, "Total branch-length is now %.4f was %.4f expected %.4f\n",
new_tot_len, last_tot_len, expected_tot_len);
if (new_tot_len < last_tot_len) {
last_tot_len = new_tot_len;
break; /* no rewinding necessary */
}
if (verbose > 2)
fprintf(stderr, "Rewinding SPR to %d\n",iCBest);
UnwindSPRStep(/*IN/OUT*/NJ, /*IN*/&steps[iCBest], /*IN/OUT*/upProfiles);
dMinDelta -= steps[iCBest].deltaLength;
iCBest--;
}
if (iCBest >= 0)
bChanged = true;
} /* loop over which step to take at 1st NNI */
} /* loop over which node to pivot around */
if (bChanged) {
nSPR++; /* the SPR move is OK */
/* make sure all the profiles are OK */
int j;
for (j = 0; j < NJ->maxnodes; j++)
DeleteUpProfile(upProfiles, NJ, j);
int ancestor;
for (ancestor = NJ->parent[node]; ancestor >= 0; ancestor = NJ->parent[ancestor])
RecomputeProfile(/*IN/OUT*/NJ, upProfiles, ancestor, /*useML*/false);
}
} /* end loop over subtrees to prune & regraft */
steps = myfree(steps, sizeof(spr_step_t) * maxSPRLength);
upProfiles = FreeUpProfiles(upProfiles,NJ);
nodeList = myfree(nodeList, sizeof(int) * NJ->maxnodes);
}
void RecomputeProfile(/*IN/OUT*/NJ_t *NJ, /*IN/OUT*/profile_t **upProfiles, int node,
bool useML) {
if (node < NJ->nSeq || node == NJ->root)
return; /* no profile to compute */
assert(NJ->child[node].nChild==2);
profile_t *profiles[4];
double weight = 0.5;
if (useML || !bionj) {
profiles[0] = NJ->profiles[NJ->child[node].child[0]];
profiles[1] = NJ->profiles[NJ->child[node].child[1]];
} else {
int nodeABCD[4];
SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML);
weight = QuartetWeight(profiles, NJ->distance_matrix, NJ->nPos);
}
if (verbose>3) {
if (useML) {
fprintf(stderr, "Recompute %d from %d %d lengths %.4f %.4f\n",
node,
NJ->child[node].child[0],
NJ->child[node].child[1],
NJ->branchlength[NJ->child[node].child[0]],
NJ->branchlength[NJ->child[node].child[1]]);
} else {
fprintf(stderr, "Recompute %d from %d %d weight %.3f\n",
node, NJ->child[node].child[0], NJ->child[node].child[1], weight);
}
}
NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints);
if (useML) {
NJ->profiles[node] = PosteriorProfile(profiles[0], profiles[1],
NJ->branchlength[NJ->child[node].child[0]],
NJ->branchlength[NJ->child[node].child[1]],
NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints);
} else {
NJ->profiles[node] = AverageProfile(profiles[0], profiles[1],
NJ->nPos, NJ->nConstraints,
NJ->distance_matrix, weight);
}
}
/* The BIONJ-like formula for the weight of A when building a profile for AB is
1/2 + (avgD(B,CD) - avgD(A,CD))/(2*d(A,B))
*/
double QuartetWeight(profile_t *profiles[4], distance_matrix_t *dmat, int nPos) {
if (!bionj)
return(-1.0); /* even weighting */
double d[6];
CorrectedPairDistances(profiles, 4, dmat, nPos, /*OUT*/d);
if (d[qAB] < 0.01)
return -1.0;
double weight = 0.5 + ((d[qBC]+d[qBD])-(d[qAC]+d[qAD]))/(4*d[qAB]);
if (weight < 0)
weight = 0;
if (weight > 1)
weight = 1;
return (weight);
}
/* Resets the children entry of parent and also the parent entry of newchild */
void ReplaceChild(/*IN/OUT*/NJ_t *NJ, int parent, int oldchild, int newchild) {
NJ->parent[newchild] = parent;
int iChild;
for (iChild = 0; iChild < NJ->child[parent].nChild; iChild++) {
if (NJ->child[parent].child[iChild] == oldchild) {
NJ->child[parent].child[iChild] = newchild;
return;
}
}
assert(0);
}
/* Recomputes all branch lengths
For internal branches such as (A,B) vs. (C,D), uses the formula
length(AB|CD) = (d(A,C)+d(A,D)+d(B,C)+d(B,D))/4 - d(A,B)/2 - d(C,D)/2
(where all distances are profile distances - diameters).
For external branches (e.g. to leaves) A vs. (B,C), use the formula
length(A|BC) = (d(A,B)+d(A,C)-d(B,C))/2
*/
void UpdateBranchLengths(/*IN/OUT*/NJ_t *NJ) {
if (NJ->nSeq < 2)
return;
else if (NJ->nSeq == 2) {
int root = NJ->root;
int nodeA = NJ->child[root].child[0];
int nodeB = NJ->child[root].child[1];
besthit_t h;
ProfileDist(NJ->profiles[nodeA],NJ->profiles[nodeB],
NJ->nPos, NJ->distance_matrix, /*OUT*/&h);
if (logdist)
h.dist = LogCorrect(h.dist);
NJ->branchlength[nodeA] = h.dist/2.0;
NJ->branchlength[nodeB] = h.dist/2.0;
return;
}
profile_t **upProfiles = UpProfiles(NJ);
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
/* reset branch length of node (distance to its parent) */
if (node == NJ->root)
continue; /* no branch length to set */
if (node < NJ->nSeq) { /* a leaf */
profile_t *profileA = NJ->profiles[node];
profile_t *profileB = NULL;
profile_t *profileC = NULL;
int sib = Sibling(NJ,node);
if (sib == -1) { /* at root, have 2 siblings */
int sibs[2];
RootSiblings(NJ, node, /*OUT*/sibs);
profileB = NJ->profiles[sibs[0]];
profileC = NJ->profiles[sibs[1]];
} else {
profileB = NJ->profiles[sib];
profileC = GetUpProfile(/*IN/OUT*/upProfiles, NJ, NJ->parent[node], /*useML*/false);
}
profile_t *profiles[3] = {profileA,profileB,profileC};
double d[3]; /*AB,AC,BC*/
CorrectedPairDistances(profiles, 3, NJ->distance_matrix, NJ->nPos, /*OUT*/d);
/* d(A,BC) = (dAB+dAC-dBC)/2 */
NJ->branchlength[node] = (d[0]+d[1]-d[2])/2.0;
} else {
profile_t *profiles[4];
int nodeABCD[4];
SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false);
double d[6];
CorrectedPairDistances(profiles, 4, NJ->distance_matrix, NJ->nPos, /*OUT*/d);
NJ->branchlength[node] = (d[qAC]+d[qAD]+d[qBC]+d[qBD])/4.0 - (d[qAB]+d[qCD])/2.0;
/* no longer needed */
DeleteUpProfile(upProfiles, NJ, nodeABCD[0]);
DeleteUpProfile(upProfiles, NJ, nodeABCD[1]);
}
}
traversal = FreeTraversal(traversal,NJ);
upProfiles = FreeUpProfiles(upProfiles,NJ);
}
/* Pick columns for resampling, stored as returned_vector[iBoot*nPos + j] */
int *ResampleColumns(int nPos, int nBootstrap) {
long lPos = nPos; /* to prevent overflow on very long alignments when multiplying nPos * nBootstrap */
int *col = (int*)mymalloc(sizeof(int)*lPos*(size_t)nBootstrap);
int i;
for (i = 0; i < nBootstrap; i++) {
int j;
for (j = 0; j < nPos; j++) {
int pos = (int)(knuth_rand() * nPos);
if (pos<0)
pos = 0;
else if (pos == nPos)
pos = nPos-1;
col[i*lPos + j] = pos;
}
}
if (verbose > 5) {
for (i=0; i < 3 && i < nBootstrap; i++) {
fprintf(stderr,"Boot%d",i);
int j;
for (j = 0; j < nPos; j++) {
fprintf(stderr,"\t%d",col[i*lPos+j]);
}
fprintf(stderr,"\n");
}
}
return(col);
}
void ReliabilityNJ(/*IN/OUT*/NJ_t *NJ, int nBootstrap) {
/* For each non-root node N, with children A,B, parent P, sibling C, and grandparent G,
we test the reliability of the split (A,B) versus rest by comparing the profiles
of A, B, C, and the "up-profile" of P.
Each node's upProfile is the average of its sibling's (down)-profile + its parent's up-profile
(If node's parent is the root, then there are two siblings and we don't need an up-profile)
To save memory, we do depth-first-search down from the root, and we only keep
up-profiles for nodes in the active path.
*/
if (NJ->nSeq <= 3 || nBootstrap <= 0)
return; /* nothing to do */
int *col = ResampleColumns(NJ->nPos, nBootstrap);
profile_t **upProfiles = UpProfiles(NJ);
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
int iNodesDone = 0;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
if (node < NJ->nSeq || node == NJ->root)
continue; /* nothing to do for leaves or root */
if(iNodesDone > 0 && (iNodesDone % 100) == 0)
ProgressReport("Local bootstrap for %6d of %6d internal splits", iNodesDone, NJ->nSeq-3, 0, 0);
iNodesDone++;
profile_t *profiles[4];
int nodeABCD[4];
SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false);
NJ->support[node] = SplitSupport(profiles[0], profiles[1], profiles[2], profiles[3],
NJ->distance_matrix,
NJ->nPos,
nBootstrap,
col);
/* no longer needed */
DeleteUpProfile(upProfiles, NJ, nodeABCD[0]);
DeleteUpProfile(upProfiles, NJ, nodeABCD[1]);
DeleteUpProfile(upProfiles, NJ, nodeABCD[2]);
}
traversal = FreeTraversal(traversal,NJ);
upProfiles = FreeUpProfiles(upProfiles,NJ);
col = myfree(col, sizeof(int)*((size_t)NJ->nPos)*nBootstrap);
}
profile_t *NewProfile(int nPos, int nConstraints) {
profile_t *profile = (profile_t *)mymalloc(sizeof(profile_t));
profile->weights = mymalloc(sizeof(numeric_t)*nPos);
profile->codes = mymalloc(sizeof(unsigned char)*nPos);
profile->vectors = NULL;
profile->nVectors = 0;
profile->codeDist = NULL;
if (nConstraints == 0) {
profile->nOn = NULL;
profile->nOff = NULL;
} else {
profile->nOn = mymalloc(sizeof(int)*nConstraints);
profile->nOff = mymalloc(sizeof(int)*nConstraints);
}
return(profile);
}
profile_t *FreeProfile(profile_t *profile, int nPos, int nConstraints) {
if(profile==NULL) return(NULL);
myfree(profile->codes, nPos);
myfree(profile->weights, nPos);
myfree(profile->vectors, sizeof(numeric_t)*nCodes*profile->nVectors);
myfree(profile->codeDist, sizeof(numeric_t)*nCodes*nPos);
if (nConstraints > 0) {
myfree(profile->nOn, sizeof(int)*nConstraints);
myfree(profile->nOff, sizeof(int)*nConstraints);
}
return(myfree(profile, sizeof(profile_t)));
}
void SetupABCD(NJ_t *NJ, int node,
/* the 4 profiles; the last one is an outprofile */
/*OPTIONAL OUT*/profile_t *profiles[4],
/*OPTIONAL IN/OUT*/profile_t **upProfiles,
/*OUT*/int nodeABCD[4],
bool useML) {
int parent = NJ->parent[node];
assert(parent >= 0);
assert(NJ->child[node].nChild == 2);
nodeABCD[0] = NJ->child[node].child[0]; /*A*/
nodeABCD[1] = NJ->child[node].child[1]; /*B*/
profile_t *profile4 = NULL;
if (parent == NJ->root) {
int sibs[2];
RootSiblings(NJ, node, /*OUT*/sibs);
nodeABCD[2] = sibs[0];
nodeABCD[3] = sibs[1];
if (profiles == NULL)
return;
profile4 = NJ->profiles[sibs[1]];
} else {
nodeABCD[2] = Sibling(NJ,node);
assert(nodeABCD[2] >= 0);
nodeABCD[3] = parent;
if (profiles == NULL)
return;
profile4 = GetUpProfile(upProfiles,NJ,parent,useML);
}
assert(upProfiles != NULL);
int i;
for (i = 0; i < 3; i++)
profiles[i] = NJ->profiles[nodeABCD[i]];
profiles[3] = profile4;
}
int Sibling(NJ_t *NJ, int node) {
int parent = NJ->parent[node];
if (parent < 0 || parent == NJ->root)
return(-1);
int iChild;
for(iChild=0;iChild<NJ->child[parent].nChild;iChild++) {
if(NJ->child[parent].child[iChild] != node)
return (NJ->child[parent].child[iChild]);
}
assert(0);
return(-1);
}
void RootSiblings(NJ_t *NJ, int node, /*OUT*/int sibs[2]) {
assert(NJ->parent[node] == NJ->root);
assert(NJ->child[NJ->root].nChild == 3);
int nSibs = 0;
int iChild;
for(iChild=0; iChild < NJ->child[NJ->root].nChild; iChild++) {
int child = NJ->child[NJ->root].child[iChild];
if (child != node) sibs[nSibs++] = child;
}
assert(nSibs==2);
}
void TestSplitsML(/*IN/OUT*/NJ_t *NJ, /*OUT*/SplitCount_t *splitcount, int nBootstrap) {
const double tolerance = 1e-6;
splitcount->nBadSplits = 0;
splitcount->nConstraintViolations = 0;
splitcount->nBadBoth = 0;
splitcount->nSplits = 0;
splitcount->dWorstDeltaUnconstrained = 0;
splitcount->dWorstDeltaConstrained = 0;
profile_t **upProfiles = UpProfiles(NJ);
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
int *col = nBootstrap > 0 ? ResampleColumns(NJ->nPos, nBootstrap) : NULL;
double *site_likelihoods[3];
int choice;
for (choice = 0; choice < 3; choice++)
site_likelihoods[choice] = mymalloc(sizeof(double)*NJ->nPos);
int iNodesDone = 0;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
if (node < NJ->nSeq || node == NJ->root)
continue; /* nothing to do for leaves or root */
if(iNodesDone > 0 && (iNodesDone % 100) == 0)
ProgressReport("ML split tests for %6d of %6d internal splits", iNodesDone, NJ->nSeq-3, 0, 0);
iNodesDone++;
profile_t *profiles[4];
int nodeABCD[4];
SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/true);
double loglk[3];
double len[5];
int i;
for (i = 0; i < 4; i++)
len[i] = NJ->branchlength[nodeABCD[i]];
len[4] = NJ->branchlength[node];
double lenABvsCD[5] = {len[LEN_A], len[LEN_B], len[LEN_C], len[LEN_D], len[LEN_I]};
double lenACvsBD[5] = {len[LEN_A], len[LEN_C], len[LEN_B], len[LEN_D], len[LEN_I]}; /* Swap B & C */
double lenADvsBC[5] = {len[LEN_A], len[LEN_D], len[LEN_C], len[LEN_B], len[LEN_I]}; /* Swap B & D */
{
#ifdef OPENMP
#pragma omp parallel
#pragma omp sections
#endif
{
#ifdef OPENMP
#pragma omp section
#endif
{
/* Lengths are already optimized for ABvsCD */
loglk[ABvsCD] = MLQuartetLogLk(profiles[0], profiles[1], profiles[2], profiles[3],
NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenABvsCD,
/*OUT*/site_likelihoods[ABvsCD]);
}
#ifdef OPENMP
#pragma omp section
#endif
{
loglk[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3],
NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenACvsBD, /*pStarTest*/NULL,
/*OUT*/site_likelihoods[ACvsBD]);
}
#ifdef OPENMP
#pragma omp section
#endif
{
loglk[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1],
NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenADvsBC, /*pStarTest*/NULL,
/*OUT*/site_likelihoods[ADvsBC]);
}
}
}
/* do a second pass on the better alternative if it is close */
if (loglk[ACvsBD] > loglk[ADvsBC]) {
if (mlAccuracy > 1 || loglk[ACvsBD] > loglk[ABvsCD] - closeLogLkLimit) {
loglk[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3],
NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenACvsBD, /*pStarTest*/NULL,
/*OUT*/site_likelihoods[ACvsBD]);
}
} else {
if (mlAccuracy > 1 || loglk[ADvsBC] > loglk[ABvsCD] - closeLogLkLimit) {
loglk[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1],
NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenADvsBC, /*pStarTest*/NULL,
/*OUT*/site_likelihoods[ADvsBC]);
}
}
if (loglk[ABvsCD] >= loglk[ACvsBD] && loglk[ABvsCD] >= loglk[ADvsBC])
choice = ABvsCD;
else if (loglk[ACvsBD] >= loglk[ABvsCD] && loglk[ACvsBD] >= loglk[ADvsBC])
choice = ACvsBD;
else
choice = ADvsBC;
bool badSplit = loglk[choice] > loglk[ABvsCD] + treeLogLkDelta; /* ignore small changes in likelihood */
/* constraint penalties, indexed by nni_t (lower is better) */
double p[3];
QuartetConstraintPenalties(profiles, NJ->nConstraints, /*OUT*/p);
bool bBadConstr = p[ABvsCD] > p[ACvsBD] + tolerance || p[ABvsCD] > p[ADvsBC] + tolerance;
bool violateConstraint = false;
int iC;
for (iC=0; iC < NJ->nConstraints; iC++) {
if (SplitViolatesConstraint(profiles, iC)) {
violateConstraint = true;
break;
}
}
splitcount->nSplits++;
if (violateConstraint)
splitcount->nConstraintViolations++;
if (badSplit)
splitcount->nBadSplits++;
if (badSplit && bBadConstr)
splitcount->nBadBoth++;
if (badSplit) {
double delta = loglk[choice] - loglk[ABvsCD];
/* If ABvsCD is favored over the more likely NNI by constraints,
then this is probably a bad split because of the constraint */
if (p[choice] > p[ABvsCD] + tolerance)
splitcount->dWorstDeltaConstrained = MAX(delta, splitcount->dWorstDeltaConstrained);
else
splitcount->dWorstDeltaUnconstrained = MAX(delta, splitcount->dWorstDeltaUnconstrained);
}
if (nBootstrap>0)
NJ->support[node] = badSplit ? 0.0 : SHSupport(NJ->nPos, nBootstrap, col, loglk, site_likelihoods);
/* No longer needed */
DeleteUpProfile(upProfiles, NJ, nodeABCD[0]);
DeleteUpProfile(upProfiles, NJ, nodeABCD[1]);
DeleteUpProfile(upProfiles, NJ, nodeABCD[2]);
}
traversal = FreeTraversal(traversal,NJ);
upProfiles = FreeUpProfiles(upProfiles,NJ);
if (nBootstrap>0)
col = myfree(col, sizeof(int)*((size_t)NJ->nPos)*nBootstrap);
for (choice = 0; choice < 3; choice++)
site_likelihoods[choice] = myfree(site_likelihoods[choice], sizeof(double)*NJ->nPos);
}
void TestSplitsMinEvo(NJ_t *NJ, /*OUT*/SplitCount_t *splitcount) {
const double tolerance = 1e-6;
splitcount->nBadSplits = 0;
splitcount->nConstraintViolations = 0;
splitcount->nBadBoth = 0;
splitcount->nSplits = 0;
splitcount->dWorstDeltaUnconstrained = 0.0;
splitcount->dWorstDeltaConstrained = 0.0;
profile_t **upProfiles = UpProfiles(NJ);
traversal_t traversal = InitTraversal(NJ);
int node = NJ->root;
while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) {
if (node < NJ->nSeq || node == NJ->root)
continue; /* nothing to do for leaves or root */
profile_t *profiles[4];
int nodeABCD[4];
SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false);
if (verbose>2)
fprintf(stderr,"Testing Split around %d: A=%d B=%d C=%d D=up(%d) or node parent %d\n",
node, nodeABCD[0], nodeABCD[1], nodeABCD[2], nodeABCD[3], NJ->parent[node]);
double d[6]; /* distances, perhaps log-corrected distances, no constraint penalties */
CorrectedPairDistances(profiles, 4, NJ->distance_matrix, NJ->nPos, /*OUT*/d);
/* alignment-based scores for each split (lower is better) */
double sABvsCD = d[qAB] + d[qCD];
double sACvsBD = d[qAC] + d[qBD];
double sADvsBC = d[qAD] + d[qBC];
/* constraint penalties, indexed by nni_t (lower is better) */
double p[3];
QuartetConstraintPenalties(profiles, NJ->nConstraints, /*OUT*/p);
int nConstraintsViolated = 0;
int iC;
for (iC=0; iC < NJ->nConstraints; iC++) {
if (SplitViolatesConstraint(profiles, iC)) {
nConstraintsViolated++;
if (verbose > 2) {
double penalty[3] = {0.0,0.0,0.0};
(void)QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/penalty);
fprintf(stderr, "Violate constraint %d at %d (children %d %d) penalties %.3f %.3f %.3f %d/%d %d/%d %d/%d %d/%d\n",
iC, node, NJ->child[node].child[0], NJ->child[node].child[1],
penalty[ABvsCD], penalty[ACvsBD], penalty[ADvsBC],
profiles[0]->nOn[iC], profiles[0]->nOff[iC],
profiles[1]->nOn[iC], profiles[1]->nOff[iC],
profiles[2]->nOn[iC], profiles[2]->nOff[iC],
profiles[3]->nOn[iC], profiles[3]->nOff[iC]);
}
}
}
double delta = sABvsCD - MIN(sACvsBD,sADvsBC);
bool bBadDist = delta > tolerance;
bool bBadConstr = p[ABvsCD] > p[ACvsBD] + tolerance || p[ABvsCD] > p[ADvsBC] + tolerance;
splitcount->nSplits++;
if (bBadDist) {
nni_t choice = sACvsBD < sADvsBC ? ACvsBD : ADvsBC;
/* If ABvsCD is favored over the shorter NNI by constraints,
then this is probably a bad split because of the constraint */
if (p[choice] > p[ABvsCD] + tolerance)
splitcount->dWorstDeltaConstrained = MAX(delta, splitcount->dWorstDeltaConstrained);
else
splitcount->dWorstDeltaUnconstrained = MAX(delta, splitcount->dWorstDeltaUnconstrained);
}
if (nConstraintsViolated > 0)
splitcount->nConstraintViolations++; /* count splits with any violations, not #constraints in a splits */
if (bBadDist)
splitcount->nBadSplits++;
if (bBadDist && bBadConstr)
splitcount->nBadBoth++;
if (bBadConstr && verbose > 2) {
/* Which NNI would be better */
double dist_advantage = 0;
double constraint_penalty = 0;
if (p[ACvsBD] < p[ADvsBC]) {
dist_advantage = sACvsBD - sABvsCD;
constraint_penalty = p[ABvsCD] - p[ACvsBD];
} else {
dist_advantage = sADvsBC - sABvsCD;
constraint_penalty = p[ABvsCD] - p[ADvsBC];
}
fprintf(stderr, "Violate constraints %d distance_advantage %.3f constraint_penalty %.3f (children %d %d):",
node, dist_advantage, constraint_penalty,
NJ->child[node].child[0], NJ->child[node].child[1]);
/* list the constraints with a penalty, meaning that ABCD all have non-zero
values and that AB|CD worse than others */
for (iC = 0; iC < NJ->nConstraints; iC++) {
double ppart[6];
if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/ppart)) {
if (ppart[qAB] + ppart[qCD] > ppart[qAD] + ppart[qBC] + tolerance
|| ppart[qAB] + ppart[qCD] > ppart[qAC] + ppart[qBD] + tolerance)
fprintf(stderr, " %d (%d/%d %d/%d %d/%d %d/%d)", iC,
profiles[0]->nOn[iC], profiles[0]->nOff[iC],
profiles[1]->nOn[iC], profiles[1]->nOff[iC],
profiles[2]->nOn[iC], profiles[2]->nOff[iC],
profiles[3]->nOn[iC], profiles[3]->nOff[iC]);
}
}
fprintf(stderr, "\n");
}
/* no longer needed */
DeleteUpProfile(upProfiles, NJ, nodeABCD[0]);
DeleteUpProfile(upProfiles, NJ, nodeABCD[1]);
}
traversal = FreeTraversal(traversal,NJ);
upProfiles = FreeUpProfiles(upProfiles,NJ);
}
/* Computes support for (A,B),(C,D) compared to that for (A,C),(B,D) and (A,D),(B,C) */
double SplitSupport(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD,
/*OPTIONAL*/distance_matrix_t *dmat,
int nPos,
int nBootstrap,
int *col) {
int i,j;
long lPos = nPos; /* to avoid overflow when multiplying */
/* Note distpieces are weighted */
double *distpieces[6];
double *weights[6];
for (j = 0; j < 6; j++) {
distpieces[j] = (double*)mymalloc(sizeof(double)*nPos);
weights[j] = (double*)mymalloc(sizeof(double)*nPos);
}
int iFreqA = 0;
int iFreqB = 0;
int iFreqC = 0;
int iFreqD = 0;
for (i = 0; i < nPos; i++) {
numeric_t *fA = GET_FREQ(pA, i, /*IN/OUT*/iFreqA);
numeric_t *fB = GET_FREQ(pB, i, /*IN/OUT*/iFreqB);
numeric_t *fC = GET_FREQ(pC, i, /*IN/OUT*/iFreqC);
numeric_t *fD = GET_FREQ(pD, i, /*IN/OUT*/iFreqD);
weights[qAB][i] = pA->weights[i] * pB->weights[i];
weights[qAC][i] = pA->weights[i] * pC->weights[i];
weights[qAD][i] = pA->weights[i] * pD->weights[i];
weights[qBC][i] = pB->weights[i] * pC->weights[i];
weights[qBD][i] = pB->weights[i] * pD->weights[i];
weights[qCD][i] = pC->weights[i] * pD->weights[i];
distpieces[qAB][i] = weights[qAB][i] * ProfileDistPiece(pA->codes[i], pB->codes[i], fA, fB, dmat, NULL);
distpieces[qAC][i] = weights[qAC][i] * ProfileDistPiece(pA->codes[i], pC->codes[i], fA, fC, dmat, NULL);
distpieces[qAD][i] = weights[qAD][i] * ProfileDistPiece(pA->codes[i], pD->codes[i], fA, fD, dmat, NULL);
distpieces[qBC][i] = weights[qBC][i] * ProfileDistPiece(pB->codes[i], pC->codes[i], fB, fC, dmat, NULL);
distpieces[qBD][i] = weights[qBD][i] * ProfileDistPiece(pB->codes[i], pD->codes[i], fB, fD, dmat, NULL);
distpieces[qCD][i] = weights[qCD][i] * ProfileDistPiece(pC->codes[i], pD->codes[i], fC, fD, dmat, NULL);
}
assert(iFreqA == pA->nVectors);
assert(iFreqB == pB->nVectors);
assert(iFreqC == pC->nVectors);
assert(iFreqD == pD->nVectors);
double totpieces[6];
double totweights[6];
double dists[6];
for (j = 0; j < 6; j++) {
totpieces[j] = 0.0;
totweights[j] = 0.0;
for (i = 0; i < nPos; i++) {
totpieces[j] += distpieces[j][i];
totweights[j] += weights[j][i];
}
dists[j] = totweights[j] > 0.01 ? totpieces[j]/totweights[j] : 3.0;
if (logdist)
dists[j] = LogCorrect(dists[j]);
}
/* Support1 = Support(AB|CD over AC|BD) = d(A,C)+d(B,D)-d(A,B)-d(C,D)
Support2 = Support(AB|CD over AD|BC) = d(A,D)+d(B,C)-d(A,B)-d(C,D)
*/
double support1 = dists[qAC] + dists[qBD] - dists[qAB] - dists[qCD];
double support2 = dists[qAD] + dists[qBC] - dists[qAB] - dists[qCD];
if (support1 < 0 || support2 < 0) {
nSuboptimalSplits++; /* Another split seems superior */
}
assert(nBootstrap > 0);
int nSupport = 0;
int iBoot;
for (iBoot=0;iBoot<nBootstrap;iBoot++) {
int *colw = &col[lPos*iBoot];
for (j = 0; j < 6; j++) {
double totp = 0;
double totw = 0;
double *d = distpieces[j];
double *w = weights[j];
for (i=0; i<nPos; i++) {
int c = colw[i];
totp += d[c];
totw += w[c];
}
dists[j] = totw > 0.01 ? totp/totw : 3.0;
if (logdist)
dists[j] = LogCorrect(dists[j]);
}
support1 = dists[qAC] + dists[qBD] - dists[qAB] - dists[qCD];
support2 = dists[qAD] + dists[qBC] - dists[qAB] - dists[qCD];
if (support1 > 0 && support2 > 0)
nSupport++;
} /* end loop over bootstrap replicates */
for (j = 0; j < 6; j++) {
distpieces[j] = myfree(distpieces[j], sizeof(double)*nPos);
weights[j] = myfree(weights[j], sizeof(double)*nPos);
}
return( nSupport/(double)nBootstrap );
}
double SHSupport(int nPos, int nBootstrap, int *col, double loglk[3], double *site_likelihoods[3]) {
long lPos = nPos; /* to avoid overflow when multiplying */
assert(nBootstrap>0);
double delta1 = loglk[0]-loglk[1];
double delta2 = loglk[0]-loglk[2];
double delta = delta1 < delta2 ? delta1 : delta2;
double *siteloglk[3];
int i,j;
for (i = 0; i < 3; i++) {
siteloglk[i] = mymalloc(sizeof(double)*nPos);
for (j = 0; j < nPos; j++)
siteloglk[i][j] = log(site_likelihoods[i][j]);
}
int nSupport = 0;
int iBoot;
for (iBoot = 0; iBoot < nBootstrap; iBoot++) {
double resampled[3];
for (i = 0; i < 3; i++)
resampled[i] = -loglk[i];
for (j = 0; j < nPos; j++) {
int pos = col[iBoot*lPos+j];
for (i = 0; i < 3; i++)
resampled[i] += siteloglk[i][pos];
}
int iBest = 0;
for (i = 1; i < 3; i++)
if (resampled[i] > resampled[iBest])
iBest = i;
double resample1 = resampled[iBest] - resampled[(iBest+1)%3];
double resample2 = resampled[iBest] - resampled[(iBest+2)%3];
double resampleDelta = resample1 < resample2 ? resample1 : resample2;
if (resampleDelta < delta)
nSupport++;
}
for (i=0;i<3;i++)
siteloglk[i] = myfree(siteloglk[i], sizeof(double)*nPos);
return(nSupport/(double)nBootstrap);
}
void SetDistCriterion(/*IN/OUT*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *hit) {
if (hit->i < NJ->nSeq && hit->j < NJ->nSeq) {
SeqDist(NJ->profiles[hit->i]->codes,
NJ->profiles[hit->j]->codes,
NJ->nPos, NJ->distance_matrix, /*OUT*/hit);
} else {
ProfileDist(NJ->profiles[hit->i],
NJ->profiles[hit->j],
NJ->nPos, NJ->distance_matrix, /*OUT*/hit);
hit->dist -= (NJ->diameter[hit->i] + NJ->diameter[hit->j]);
}
hit->dist += constraintWeight
* (double)JoinConstraintPenalty(NJ, hit->i, hit->j);
SetCriterion(NJ,nActive,/*IN/OUT*/hit);
}
void SetCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join) {
if(join->i < 0
|| join->j < 0
|| NJ->parent[join->i] >= 0
|| NJ->parent[join->j] >= 0)
return;
assert(NJ->nOutDistActive[join->i] >= nActive);
assert(NJ->nOutDistActive[join->j] >= nActive);
int nDiffAllow = tophitsMult > 0 ? (int)(nActive*staleOutLimit) : 0;
if (NJ->nOutDistActive[join->i] - nActive > nDiffAllow)
SetOutDistance(NJ, join->i, nActive);
if (NJ->nOutDistActive[join->j] - nActive > nDiffAllow)
SetOutDistance(NJ, join->j, nActive);
double outI = NJ->outDistances[join->i];
if (NJ->nOutDistActive[join->i] != nActive)
outI *= (nActive-1)/(double)(NJ->nOutDistActive[join->i]-1);
double outJ = NJ->outDistances[join->j];
if (NJ->nOutDistActive[join->j] != nActive)
outJ *= (nActive-1)/(double)(NJ->nOutDistActive[join->j]-1);
join->criterion = join->dist - (outI+outJ)/(double)(nActive-2);
if (verbose > 2 && nActive <= 5) {
fprintf(stderr, "Set Criterion to join %d %d with nActive=%d dist+penalty %.3f criterion %.3f\n",
join->i, join->j, nActive, join->dist, join->criterion);
}
}
void SetOutDistance(NJ_t *NJ, int iNode, int nActive) {
if (NJ->nOutDistActive[iNode] == nActive)
return;
/* May be called by InitNJ before we have parents */
assert(iNode>=0 && (NJ->parent == NULL || NJ->parent[iNode]<0));
besthit_t dist;
ProfileDist(NJ->profiles[iNode], NJ->outprofile, NJ->nPos, NJ->distance_matrix, &dist);
outprofileOps++;
/* out(A) = sum(X!=A) d(A,X)
= sum(X!=A) (profiledist(A,X) - diam(A) - diam(X))
= sum(X!=A) profiledist(A,X) - (N-1)*diam(A) - (totdiam - diam(A))
in the absence of gaps:
profiledist(A,out) = mean profiledist(A, all active nodes)
sum(X!=A) profiledist(A,X) = N * profiledist(A,out) - profiledist(A,A)
With gaps, we need to take the weights of the comparisons into account, where
w(Ai) is the weight of position i in profile A:
w(A,B) = sum_i w(Ai) * w(Bi)
d(A,B) = sum_i w(Ai) * w(Bi) * d(Ai,Bi) / w(A,B)
sum(X!=A) profiledist(A,X) ~= (N-1) * profiledist(A, Out w/o A)
profiledist(A, Out w/o A) = sum_X!=A sum_i d(Ai,Xi) * w(Ai) * w(Bi) / ( sum_X!=A sum_i w(Ai) * w(Bi) )
d(A, Out) = sum_A sum_i d(Ai,Xi) * w(Ai) * w(Bi) / ( sum_X sum_i w(Ai) * w(Bi) )
and so we get
profiledist(A,out w/o A) = (top of d(A,Out) - top of d(A,A)) / (weight of d(A,Out) - weight of d(A,A))
top = dist * weight
with another correction of nActive because the weight of the out-profile is the average
weight not the total weight.
*/
double top = (nActive-1)
* (dist.dist * dist.weight * nActive - NJ->selfweight[iNode] * NJ->selfdist[iNode]);
double bottom = (dist.weight * nActive - NJ->selfweight[iNode]);
double pdistOutWithoutA = top/bottom;
NJ->outDistances[iNode] = bottom > 0.01 ?
pdistOutWithoutA - NJ->diameter[iNode] * (nActive-1) - (NJ->totdiam - NJ->diameter[iNode])
: 3.0;
NJ->nOutDistActive[iNode] = nActive;
if(verbose>3 && iNode < 5)
fprintf(stderr,"NewOutDist for %d %f from dist %f selfd %f diam %f totdiam %f newActive %d\n",
iNode, NJ->outDistances[iNode], dist.dist, NJ->selfdist[iNode], NJ->diameter[iNode],
NJ->totdiam, nActive);
if (verbose>6 && (iNode % 10) == 0) {
/* Compute the actual out-distance and compare */
double total = 0.0;
double total_pd = 0.0;
int j;
for (j=0;j<NJ->maxnode;j++) {
if (j!=iNode && (NJ->parent==NULL || NJ->parent[j]<0)) {
besthit_t bh;
ProfileDist(NJ->profiles[iNode], NJ->profiles[j], NJ->nPos, NJ->distance_matrix, /*OUT*/&bh);
total_pd += bh.dist;
total += bh.dist - (NJ->diameter[iNode] + NJ->diameter[j]);
}
}
fprintf(stderr,"OutDist for Node %d %f truth %f profiled %f truth %f pd_err %f\n",
iNode, NJ->outDistances[iNode], total, pdistOutWithoutA, total_pd,fabs(pdistOutWithoutA-total_pd));
}
}
top_hits_t *FreeTopHits(top_hits_t *tophits) {
if (tophits == NULL)
return(NULL);
int iNode;
for (iNode = 0; iNode < tophits->maxnodes; iNode++) {
top_hits_list_t *l = &tophits->top_hits_lists[iNode];
if (l->hits != NULL)
l->hits = myfree(l->hits, sizeof(hit_t) * l->nHits);
}
tophits->top_hits_lists = myfree(tophits->top_hits_lists, sizeof(top_hits_list_t) * tophits->maxnodes);
tophits->visible = myfree(tophits->visible, sizeof(hit_t*) * tophits->maxnodes);
tophits->topvisible = myfree(tophits->topvisible, sizeof(int) * tophits->nTopVisible);
#ifdef OPENMP
for (iNode = 0; iNode < tophits->maxnodes; iNode++)
omp_destroy_lock(&tophits->locks[iNode]);
tophits->locks = myfree(tophits->locks, sizeof(omp_lock_t) * tophits->maxnodes);
#endif
return(myfree(tophits, sizeof(top_hits_t)));
}
top_hits_t *InitTopHits(NJ_t *NJ, int m) {
int iNode;
assert(m > 0);
top_hits_t *tophits = mymalloc(sizeof(top_hits_t));
tophits->m = m;
tophits->q = (int)(0.5 + tophits2Mult * sqrt(tophits->m));
if (!useTopHits2nd || tophits->q >= tophits->m)
tophits->q = 0;
tophits->maxnodes = NJ->maxnodes;
tophits->top_hits_lists = mymalloc(sizeof(top_hits_list_t) * tophits->maxnodes);
tophits->visible = mymalloc(sizeof(hit_t) * tophits->maxnodes);
tophits->nTopVisible = (int)(0.5 + topvisibleMult*m);
tophits->topvisible = mymalloc(sizeof(int) * tophits->nTopVisible);
#ifdef OPENMP
tophits->locks = mymalloc(sizeof(omp_lock_t) * tophits->maxnodes);
for (iNode = 0; iNode < tophits->maxnodes; iNode++)
omp_init_lock(&tophits->locks[iNode]);
#endif
int i;
for (i = 0; i < tophits->nTopVisible; i++)
tophits->topvisible[i] = -1; /* empty */
tophits->topvisibleAge = 0;
for (iNode = 0; iNode < tophits->maxnodes; iNode++) {
top_hits_list_t *l = &tophits->top_hits_lists[iNode];
l->nHits = 0;
l->hits = NULL;
l->hitSource = -1;
l->age = 0;
hit_t *v = &tophits->visible[iNode];
v->j = -1;
v->dist = 1e20;
}
return(tophits);
}
/* Helper function for sorting in SetAllLeafTopHits,
and the global variables it needs
*/
NJ_t *CompareSeedNJ = NULL;
int *CompareSeedGaps = NULL;
int CompareSeeds(const void *c1, const void *c2) {
int seed1 = *(int *)c1;
int seed2 = *(int *)c2;
int gapdiff = CompareSeedGaps[seed1] - CompareSeedGaps[seed2];
if (gapdiff != 0) return(gapdiff); /* fewer gaps is better */
double outdiff = CompareSeedNJ->outDistances[seed1] - CompareSeedNJ->outDistances[seed2];
if(outdiff < 0) return(-1); /* closer to more nodes is better */
if(outdiff > 0) return(1);
return(0);
}
/* Using the seed heuristic and the close global variable */
void SetAllLeafTopHits(/*IN/UPDATE*/NJ_t *NJ, /*IN/OUT*/top_hits_t *tophits) {
double close = tophitsClose;
if (close < 0) {
if (fastest && NJ->nSeq >= 50000) {
close = 0.99;
} else {
double logN = log((double)NJ->nSeq)/log(2.0);
close = logN/(logN+2.0);
}
}
/* Sort the potential seeds, by a combination of nGaps and NJ->outDistances
We don't store nGaps so we need to compute that
*/
int *nGaps = (int*)mymalloc(sizeof(int)*NJ->nSeq);
int iNode;
for(iNode=0; iNode<NJ->nSeq; iNode++) {
nGaps[iNode] = (int)(0.5 + NJ->nPos - NJ->selfweight[iNode]);
}
int *seeds = (int*)mymalloc(sizeof(int)*NJ->nSeq);
for (iNode=0; iNode<NJ->nSeq; iNode++) seeds[iNode] = iNode;
CompareSeedNJ = NJ;
CompareSeedGaps = nGaps;
qsort(/*IN/OUT*/seeds, NJ->nSeq, sizeof(int), CompareSeeds);
CompareSeedNJ = NULL;
CompareSeedGaps = NULL;
/* For each seed, save its top 2*m hits and then look for close neighbors */
assert(2 * tophits->m <= NJ->nSeq);
int iSeed;
int nHasTopHits = 0;
#ifdef OPENMP
#pragma omp parallel for schedule(dynamic, 50)
#endif
for(iSeed=0; iSeed < NJ->nSeq; iSeed++) {
int seed = seeds[iSeed];
if (iSeed > 0 && (iSeed % 100) == 0) {
#ifdef OPENMP
#pragma omp critical
#endif
ProgressReport("Top hits for %6d of %6d seqs (at seed %6d)",
nHasTopHits, NJ->nSeq,
iSeed, 0);
}
if (tophits->top_hits_lists[seed].nHits > 0) {
if(verbose>2) fprintf(stderr, "Skipping seed %d\n", seed);
continue;
}
besthit_t *besthitsSeed = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->nSeq);
besthit_t *besthitsNeighbor = (besthit_t*)mymalloc(sizeof(besthit_t) * 2 * tophits->m);
besthit_t bestjoin;
if(verbose>2) fprintf(stderr,"Trying seed %d\n", seed);
SetBestHit(seed, NJ, /*nActive*/NJ->nSeq, /*OUT*/&bestjoin, /*OUT*/besthitsSeed);
/* sort & save top hits of self. besthitsSeed is now sorted. */
SortSaveBestHits(seed, /*IN/SORT*/besthitsSeed, /*IN-SIZE*/NJ->nSeq,
/*OUT-SIZE*/tophits->m, /*IN/OUT*/tophits);
nHasTopHits++;
/* find "close" neighbors and compute their top hits */
double neardist = besthitsSeed[2 * tophits->m - 1].dist * close;
/* must have at least average weight, rem higher is better
and allow a bit more than average, e.g. if we are looking for within 30% away,
20% more gaps than usual seems OK
Alternatively, have a coverage requirement in case neighbor is short
If fastest, consider the top q/2 hits to be close neighbors, regardless
*/
double nearweight = 0;
int iClose;
for (iClose = 0; iClose < 2 * tophits->m; iClose++)
nearweight += besthitsSeed[iClose].weight;
nearweight = nearweight/(2.0 * tophits->m); /* average */
nearweight *= (1.0-2.0*neardist/3.0);
double nearcover = 1.0 - neardist/2.0;
if(verbose>2) fprintf(stderr,"Distance limit for close neighbors %f weight %f ungapped %d\n",
neardist, nearweight, NJ->nPos-nGaps[seed]);
for (iClose = 0; iClose < tophits->m; iClose++) {
besthit_t *closehit = &besthitsSeed[iClose];
int closeNode = closehit->j;
if (tophits->top_hits_lists[closeNode].nHits > 0)
continue;
/* If within close-distance, or identical, use as close neighbor */
bool close = closehit->dist <= neardist
&& (closehit->weight >= nearweight
|| closehit->weight >= (NJ->nPos-nGaps[closeNode])*nearcover);
bool identical = closehit->dist < 1e-6
&& fabs(closehit->weight - (NJ->nPos - nGaps[seed])) < 1e-5
&& fabs(closehit->weight - (NJ->nPos - nGaps[closeNode])) < 1e-5;
if (useTopHits2nd && iClose < tophits->q && (close || identical)) {
nHasTopHits++;
nClose2Used++;
int nUse = MIN(tophits->q * tophits2Safety, 2 * tophits->m);
besthit_t *besthitsClose = mymalloc(sizeof(besthit_t) * nUse);
TransferBestHits(NJ, /*nActive*/NJ->nSeq,
closeNode,
/*IN*/besthitsSeed, /*SIZE*/nUse,
/*OUT*/besthitsClose,
/*updateDistance*/true);
SortSaveBestHits(closeNode, /*IN/SORT*/besthitsClose,
/*IN-SIZE*/nUse, /*OUT-SIZE*/tophits->q,
/*IN/OUT*/tophits);
tophits->top_hits_lists[closeNode].hitSource = seed;
besthitsClose = myfree(besthitsClose, sizeof(besthit_t) * nUse);
} else if (close || identical || (fastest && iClose < (tophits->q+1)/2)) {
nHasTopHits++;
nCloseUsed++;
if(verbose>2) fprintf(stderr, "Near neighbor %d (rank %d weight %f ungapped %d %d)\n",
closeNode, iClose, besthitsSeed[iClose].weight,
NJ->nPos-nGaps[seed],
NJ->nPos-nGaps[closeNode]);
/* compute top 2*m hits */
TransferBestHits(NJ, /*nActive*/NJ->nSeq,
closeNode,
/*IN*/besthitsSeed, /*SIZE*/2 * tophits->m,
/*OUT*/besthitsNeighbor,
/*updateDistance*/true);
SortSaveBestHits(closeNode, /*IN/SORT*/besthitsNeighbor,
/*IN-SIZE*/2 * tophits->m, /*OUT-SIZE*/tophits->m,
/*IN/OUT*/tophits);
/* And then try for a second level of transfer. We assume we
are in a good area, because of the 1st
level of transfer, and in a small neighborhood, because q is
small (32 for 1 million sequences), so we do not make any close checks.
*/
int iClose2;
for (iClose2 = 0; iClose2 < tophits->q && iClose2 < 2 * tophits->m; iClose2++) {
int closeNode2 = besthitsNeighbor[iClose2].j;
assert(closeNode2 >= 0);
if (tophits->top_hits_lists[closeNode2].hits == NULL) {
nClose2Used++;
nHasTopHits++;
int nUse = MIN(tophits->q * tophits2Safety, 2 * tophits->m);
besthit_t *besthitsClose2 = mymalloc(sizeof(besthit_t) * nUse);
TransferBestHits(NJ, /*nActive*/NJ->nSeq,
closeNode2,
/*IN*/besthitsNeighbor, /*SIZE*/nUse,
/*OUT*/besthitsClose2,
/*updateDistance*/true);
SortSaveBestHits(closeNode2, /*IN/SORT*/besthitsClose2,
/*IN-SIZE*/nUse, /*OUT-SIZE*/tophits->q,
/*IN/OUT*/tophits);
tophits->top_hits_lists[closeNode2].hitSource = closeNode;
besthitsClose2 = myfree(besthitsClose2, sizeof(besthit_t) * nUse);
} /* end if should do 2nd-level transfer */
}
}
} /* end loop over close candidates */
besthitsSeed = myfree(besthitsSeed, sizeof(besthit_t)*NJ->nSeq);
besthitsNeighbor = myfree(besthitsNeighbor, sizeof(besthit_t) * 2 * tophits->m);
} /* end loop over seeds */
for (iNode=0; iNode<NJ->nSeq; iNode++) {
top_hits_list_t *l = &tophits->top_hits_lists[iNode];
assert(l->hits != NULL);
assert(l->hits[0].j >= 0);
assert(l->hits[0].j < NJ->nSeq);
assert(l->hits[0].j != iNode);
tophits->visible[iNode] = l->hits[0];
}
if (verbose >= 2) fprintf(stderr, "#Close neighbors among leaves: 1st-level %ld 2nd-level %ld seeds %ld\n",
nCloseUsed, nClose2Used, NJ->nSeq-nCloseUsed-nClose2Used);
nGaps = myfree(nGaps, sizeof(int)*NJ->nSeq);
seeds = myfree(seeds, sizeof(int)*NJ->nSeq);
/* Now add a "checking phase" where we ensure that the q or 2*sqrt(m) hits
of i are represented in j (if they should be)
*/
long lReplace = 0;
int nCheck = tophits->q > 0 ? tophits->q : (int)(0.5 + 2.0*sqrt(tophits->m));
for (iNode = 0; iNode < NJ->nSeq; iNode++) {
if ((iNode % 100) == 0)
ProgressReport("Checking top hits for %6d of %6d seqs",
iNode+1, NJ->nSeq, 0, 0);
top_hits_list_t *lNode = &tophits->top_hits_lists[iNode];
int iHit;
for (iHit = 0; iHit < nCheck && iHit < lNode->nHits; iHit++) {
besthit_t bh = HitToBestHit(iNode, lNode->hits[iHit]);
SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bh);
top_hits_list_t *lTarget = &tophits->top_hits_lists[bh.j];
/* If this criterion is worse than the nCheck-1 entry of the target,
then skip the check.
This logic is based on assuming that the list is sorted,
which is true initially but may not be true later.
Still, is a good heuristic.
*/
assert(nCheck > 0);
assert(nCheck <= lTarget->nHits);
besthit_t bhCheck = HitToBestHit(bh.j, lTarget->hits[nCheck-1]);
SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bhCheck);
if (bhCheck.criterion < bh.criterion)
continue; /* no check needed */
/* Check if this is present in the top-hit list */
int iHit2;
bool bFound = false;
for (iHit2 = 0; iHit2 < lTarget->nHits && !bFound; iHit2++)
if (lTarget->hits[iHit2].j == iNode)
bFound = true;
if (!bFound) {
/* Find the hit with the worst criterion and replace it with this one */
int iWorst = -1;
double dWorstCriterion = -1e20;
for (iHit2 = 0; iHit2 < lTarget->nHits; iHit2++) {
besthit_t bh2 = HitToBestHit(bh.j, lTarget->hits[iHit2]);
SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bh2);
if (bh2.criterion > dWorstCriterion) {
iWorst = iHit2;
dWorstCriterion = bh2.criterion;
}
}
if (dWorstCriterion > bh.criterion) {
assert(iWorst >= 0);
lTarget->hits[iWorst].j = iNode;
lTarget->hits[iWorst].dist = bh.dist;
lReplace++;
/* and perhaps update visible */
besthit_t v;
bool bSuccess = GetVisible(NJ, /*nActive*/NJ->nSeq, tophits, bh.j, /*OUT*/&v);
assert(bSuccess);
if (bh.criterion < v.criterion)
tophits->visible[bh.j] = lTarget->hits[iWorst];
}
}
}
}
if (verbose >= 2)
fprintf(stderr, "Replaced %ld top hit entries\n", lReplace);
}
/* Updates out-distances but does not reset or update visible set */
void GetBestFromTopHits(int iNode,
/*IN/UPDATE*/NJ_t *NJ,
int nActive,
/*IN*/top_hits_t *tophits,
/*OUT*/besthit_t *bestjoin) {
assert(iNode >= 0);
assert(NJ->parent[iNode] < 0);
top_hits_list_t *l = &tophits->top_hits_lists[iNode];
assert(l->nHits > 0);
assert(l->hits != NULL);
if(!fastest)
SetOutDistance(NJ, iNode, nActive); /* ensure out-distances are not stale */
bestjoin->i = -1;
bestjoin->j = -1;
bestjoin->dist = 1e20;
bestjoin->criterion = 1e20;
int iBest;
for(iBest=0; iBest < l->nHits; iBest++) {
besthit_t bh = HitToBestHit(iNode, l->hits[iBest]);
if (UpdateBestHit(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&bh, /*update dist*/true)) {
SetCriterion(/*IN/OUT*/NJ, nActive, /*IN/OUT*/&bh); /* make sure criterion is correct */
if (bh.criterion < bestjoin->criterion)
*bestjoin = bh;
}
}
assert(bestjoin->j >= 0); /* a hit was found */
assert(bestjoin->i == iNode);
}
int ActiveAncestor(/*IN*/NJ_t *NJ, int iNode) {
if (iNode < 0)
return(iNode);
while(NJ->parent[iNode] >= 0)
iNode = NJ->parent[iNode];
return(iNode);
}
bool UpdateBestHit(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *hit,
bool bUpdateDist) {
int i = ActiveAncestor(/*IN*/NJ, hit->i);
int j = ActiveAncestor(/*IN*/NJ, hit->j);
if (i < 0 || j < 0 || i == j) {
hit->i = -1;
hit->j = -1;
hit->weight = 0;
hit->dist = 1e20;
hit->criterion = 1e20;
return(false);
}
if (i != hit->i || j != hit->j) {
hit->i = i;
hit->j = j;
if (bUpdateDist) {
SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit);
} else {
hit->dist = -1e20;
hit->criterion = 1e20;
}
}
return(true);
}
bool GetVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN/OUT*/top_hits_t *tophits,
int iNode, /*OUT*/besthit_t *visible) {
if (iNode < 0 || NJ->parent[iNode] >= 0)
return(false);
hit_t *v = &tophits->visible[iNode];
if (v->j < 0 || NJ->parent[v->j] >= 0)
return(false);
*visible = HitToBestHit(iNode, *v);
SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/visible);
return(true);
}
besthit_t *UniqueBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN/SORT*/besthit_t *combined, int nCombined,
/*OUT*/int *nUniqueOut) {
int iHit;
for (iHit = 0; iHit < nCombined; iHit++) {
besthit_t *hit = &combined[iHit];
UpdateBestHit(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit, /*update*/false);
}
qsort(/*IN/OUT*/combined, nCombined, sizeof(besthit_t), CompareHitsByIJ);
besthit_t *uniqueList = (besthit_t*)mymalloc(sizeof(besthit_t)*nCombined);
int nUnique = 0;
int iSavedLast = -1;
/* First build the new list */
for (iHit = 0; iHit < nCombined; iHit++) {
besthit_t *hit = &combined[iHit];
if (hit->i < 0 || hit->j < 0)
continue;
if (iSavedLast >= 0) {
/* toss out duplicates */
besthit_t *saved = &combined[iSavedLast];
if (saved->i == hit->i && saved->j == hit->j)
continue;
}
assert(nUnique < nCombined);
assert(hit->j >= 0 && NJ->parent[hit->j] < 0);
uniqueList[nUnique++] = *hit;
iSavedLast = iHit;
}
*nUniqueOut = nUnique;
/* Then do any updates to the criterion or the distances in parallel */
#ifdef OPENMP
#pragma omp parallel for schedule(dynamic, 50)
#endif
for (iHit = 0; iHit < nUnique; iHit++) {
besthit_t *hit = &uniqueList[iHit];
if (hit->dist < 0.0)
SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit);
else
SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit);
}
return(uniqueList);
}
/*
Create a top hit list for the new node, either
from children (if there are enough best hits left) or by a "refresh"
Also set visible set for newnode
Also update visible set for other nodes if we stumble across a "better" hit
*/
void TopHitJoin(int newnode,
/*IN/UPDATE*/NJ_t *NJ,
int nActive,
/*IN/OUT*/top_hits_t *tophits) {
long startProfileOps = profileOps;
long startOutProfileOps = outprofileOps;
assert(NJ->child[newnode].nChild == 2);
top_hits_list_t *lNew = &tophits->top_hits_lists[newnode];
assert(lNew->hits == NULL);
/* Copy the hits */
int i;
top_hits_list_t *lChild[2];
for (i = 0; i< 2; i++) {
lChild[i] = &tophits->top_hits_lists[NJ->child[newnode].child[i]];
assert(lChild[i]->hits != NULL && lChild[i]->nHits > 0);
}
int nCombined = lChild[0]->nHits + lChild[1]->nHits;
besthit_t *combinedList = (besthit_t*)mymalloc(sizeof(besthit_t)*nCombined);
HitsToBestHits(lChild[0]->hits, lChild[0]->nHits, NJ->child[newnode].child[0],
/*OUT*/combinedList);
HitsToBestHits(lChild[1]->hits, lChild[1]->nHits, NJ->child[newnode].child[1],
/*OUT*/combinedList + lChild[0]->nHits);
int nUnique;
/* UniqueBestHits() replaces children (used in the calls to HitsToBestHits)
with active ancestors, so all distances & criteria will be recomputed */
besthit_t *uniqueList = UniqueBestHits(/*IN/UPDATE*/NJ, nActive,
/*IN/SORT*/combinedList,
nCombined,
/*OUT*/&nUnique);
int nUniqueAlloc = nCombined;
combinedList = myfree(combinedList, sizeof(besthit_t)*nCombined);
/* Forget the top-hit lists of the joined nodes */
for (i = 0; i < 2; i++) {
lChild[i]->hits = myfree(lChild[i]->hits, sizeof(hit_t) * lChild[i]->nHits);
lChild[i]->nHits = 0;
}
/* Use the average age, rounded up, by 1 Versions 2.0 and earlier
used the maximum age, which leads to more refreshes without
improving the accuracy of the NJ phase. Intuitively, if one of
them was just refreshed then another refresh is unlikely to help.
*/
lNew->age = (lChild[0]->age+lChild[1]->age+1)/2 + 1;
/* If top hit ages always match (perfectly balanced), then a
limit of log2(m) would mean a refresh after
m joins, which is about what we want.
*/
int tophitAgeLimit = MAX(1, (int)(0.5 + log((double)tophits->m)/log(2.0)));
/* Either use the merged list as candidate top hits, or
move from 2nd level to 1st level, or do a refresh
UniqueBestHits eliminates hits to self, so if nUnique==nActive-1,
we've already done the exhaustive search.
Either way, we set tophits, visible(newnode), update visible of its top hits,
and modify topvisible: if we do a refresh, then we reset it, otherwise we update
*/
bool bSecondLevel = lChild[0]->hitSource >= 0 && lChild[1]->hitSource >= 0;
bool bUseUnique = nUnique==nActive-1
|| (lNew->age <= tophitAgeLimit
&& nUnique >= (bSecondLevel ? (int)(0.5 + tophits2Refresh * tophits->q)
: (int)(0.5 + tophits->m * tophitsRefresh) ));
if (bUseUnique && verbose > 2)
fprintf(stderr,"Top hits for %d from combined %d nActive=%d tophitsage %d %s\n",
newnode,nUnique,nActive,lNew->age,
bSecondLevel ? "2ndlevel" : "1stlevel");
if (!bUseUnique
&& bSecondLevel
&& lNew->age <= tophitAgeLimit) {
int source = ActiveAncestor(NJ, lChild[0]->hitSource);
if (source == newnode)
source = ActiveAncestor(NJ, lChild[1]->hitSource);
/* In parallel mode, it is possible that we would select a node as the
hit-source and then over-write that top hit with a short list.
So we need this sanity check.
*/
if (source != newnode
&& source >= 0
&& tophits->top_hits_lists[source].hitSource < 0) {
/* switch from 2nd-level to 1st-level top hits -- compute top hits list
of node from what we have so far plus the active source plus its top hits */
top_hits_list_t *lSource = &tophits->top_hits_lists[source];
assert(lSource->hitSource < 0);
assert(lSource->nHits > 0);
int nMerge = 1 + lSource->nHits + nUnique;
besthit_t *mergeList = mymalloc(sizeof(besthit_t) * nMerge);
memcpy(/*to*/mergeList, /*from*/uniqueList, nUnique * sizeof(besthit_t));
int iMerge = nUnique;
mergeList[iMerge].i = newnode;
mergeList[iMerge].j = source;
SetDistCriterion(NJ, nActive, /*IN/OUT*/&mergeList[iMerge]);
iMerge++;
HitsToBestHits(lSource->hits, lSource->nHits, newnode, /*OUT*/mergeList+iMerge);
for (i = 0; i < lSource->nHits; i++) {
SetDistCriterion(NJ, nActive, /*IN/OUT*/&mergeList[iMerge]);
iMerge++;
}
assert(iMerge == nMerge);
uniqueList = myfree(uniqueList, nUniqueAlloc * sizeof(besthit_t));
uniqueList = UniqueBestHits(/*IN/UPDATE*/NJ, nActive,
/*IN/SORT*/mergeList,
nMerge,
/*OUT*/&nUnique);
nUniqueAlloc = nMerge;
mergeList = myfree(mergeList, sizeof(besthit_t)*nMerge);
assert(nUnique > 0);
bUseUnique = nUnique >= (int)(0.5 + tophits->m * tophitsRefresh);
bSecondLevel = false;
if (bUseUnique && verbose > 2)
fprintf(stderr, "Top hits for %d from children and source %d's %d hits, nUnique %d\n",
newnode, source, lSource->nHits, nUnique);
}
}
if (bUseUnique) {
if (bSecondLevel) {
/* pick arbitrarily */
lNew->hitSource = lChild[0]->hitSource;
}
int nSave = MIN(nUnique, bSecondLevel ? tophits->q : tophits->m);
assert(nSave>0);
if (verbose > 2)
fprintf(stderr, "Combined %d ops so far %ld\n", nUnique, profileOps - startProfileOps);
SortSaveBestHits(newnode, /*IN/SORT*/uniqueList, /*nIn*/nUnique,
/*nOut*/nSave, /*IN/OUT*/tophits);
assert(lNew->hits != NULL); /* set by sort/save */
tophits->visible[newnode] = lNew->hits[0];
UpdateTopVisible(/*IN*/NJ, nActive, newnode, &tophits->visible[newnode],
/*IN/OUT*/tophits);
UpdateVisible(/*IN/UPDATE*/NJ, nActive, /*IN*/uniqueList, nSave, /*IN/OUT*/tophits);
} else {
/* need to refresh: set top hits for node and for its top hits */
if(verbose > 2) fprintf(stderr,"Top hits for %d by refresh (%d unique age %d) nActive=%d\n",
newnode,nUnique,lNew->age,nActive);
nRefreshTopHits++;
lNew->age = 0;
int iNode;
/* ensure all out-distances are up to date ahead of time
to avoid any data overwriting issues.
*/
#ifdef OPENMP
#pragma omp parallel for schedule(dynamic, 50)
#endif
for (iNode = 0; iNode < NJ->maxnode; iNode++) {
if (NJ->parent[iNode] < 0) {
if (fastest) {
besthit_t bh;
bh.i = iNode;
bh.j = iNode;
bh.dist = 0;
SetCriterion(/*IN/UPDATE*/NJ, nActive, &bh);
} else {
SetOutDistance(/*IN/UDPATE*/NJ, iNode, nActive);
}
}
}
/* exhaustively get the best 2*m hits for newnode, set visible, and save the top m */
besthit_t *allhits = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnode);
assert(2 * tophits->m <= NJ->maxnode);
besthit_t bh;
SetBestHit(newnode, NJ, nActive, /*OUT*/&bh, /*OUT*/allhits);
qsort(/*IN/OUT*/allhits, NJ->maxnode, sizeof(besthit_t), CompareHitsByCriterion);
SortSaveBestHits(newnode, /*IN/SORT*/allhits, /*nIn*/NJ->maxnode,
/*nOut*/tophits->m, /*IN/OUT*/tophits);
/* Do not need to call UpdateVisible because we set visible below */
/* And use the top 2*m entries to expand other best-hit lists, but only for top m */
int iHit;
#ifdef OPENMP
#pragma omp parallel for schedule(dynamic, 50)
#endif
for (iHit=0; iHit < tophits->m; iHit++) {
if (allhits[iHit].i < 0) continue;
int iNode = allhits[iHit].j;
assert(iNode>=0);
if (NJ->parent[iNode] >= 0) continue;
top_hits_list_t *l = &tophits->top_hits_lists[iNode];
int nHitsOld = l->nHits;
assert(nHitsOld <= tophits->m);
l->age = 0;
/* Merge: old hits into 0->nHitsOld and hits from iNode above that */
besthit_t *bothList = (besthit_t*)mymalloc(sizeof(besthit_t) * 3 * tophits->m);
HitsToBestHits(/*IN*/l->hits, nHitsOld, iNode, /*OUT*/bothList); /* does not compute criterion */
for (i = 0; i < nHitsOld; i++)
SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&bothList[i]);
if (nActive <= 2 * tophits->m)
l->hitSource = -1; /* abandon the 2nd-level top-hits heuristic */
int nNewHits = l->hitSource >= 0 ? tophits->q : tophits->m;
assert(nNewHits > 0);
TransferBestHits(/*IN/UPDATE*/NJ, nActive, iNode,
/*IN*/allhits, /*nOldHits*/2 * nNewHits,
/*OUT*/&bothList[nHitsOld],
/*updateDist*/false); /* rely on UniqueBestHits to update dist and/or criterion */
int nUnique2;
besthit_t *uniqueList2 = UniqueBestHits(/*IN/UPDATE*/NJ, nActive,
/*IN/SORT*/bothList, nHitsOld + 2 * nNewHits,
/*OUT*/&nUnique2);
assert(nUnique2 > 0);
bothList = myfree(bothList,3 * tophits->m * sizeof(besthit_t));
/* Note this will overwrite l, but we saved nHitsOld */
SortSaveBestHits(iNode, /*IN/SORT*/uniqueList2, /*nIn*/nUnique2,
/*nOut*/nNewHits, /*IN/OUT*/tophits);
/* will update topvisible below */
tophits->visible[iNode] = tophits->top_hits_lists[iNode].hits[0];
uniqueList2 = myfree(uniqueList2, (nHitsOld + 2 * tophits->m) * sizeof(besthit_t));
}
ResetTopVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits); /* outside of the parallel phase */
allhits = myfree(allhits,sizeof(besthit_t)*NJ->maxnode);
}
uniqueList = myfree(uniqueList, nUniqueAlloc * sizeof(besthit_t));
if (verbose > 2) {
fprintf(stderr, "New top-hit list for %d profile-ops %ld (out-ops %ld): source %d age %d members ",
newnode,
profileOps - startProfileOps,
outprofileOps - startOutProfileOps,
lNew->hitSource, lNew->age);
int i;
for (i = 0; i < lNew->nHits; i++)
fprintf(stderr, " %d", lNew->hits[i].j);
fprintf(stderr,"\n");
}
}
void UpdateVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN*/besthit_t *tophitsNode,
int nTopHits,
/*IN/OUT*/top_hits_t *tophits) {
int iHit;
for(iHit = 0; iHit < nTopHits; iHit++) {
besthit_t *hit = &tophitsNode[iHit];
if (hit->i < 0) continue; /* possible empty entries */
assert(NJ->parent[hit->i] < 0);
assert(hit->j >= 0 && NJ->parent[hit->j] < 0);
besthit_t visible;
bool bSuccess = GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, hit->j, /*OUT*/&visible);
if (!bSuccess || hit->criterion < visible.criterion) {
if (bSuccess)
nVisibleUpdate++;
hit_t *v = &tophits->visible[hit->j];
v->j = hit->i;
v->dist = hit->dist;
UpdateTopVisible(NJ, nActive, hit->j, v, /*IN/OUT*/tophits);
if(verbose>5) fprintf(stderr,"NewVisible %d %d %f\n",
hit->j,v->j,v->dist);
}
} /* end loop over hits */
}
/* Update the top-visible list to perhaps include visible[iNode] */
void UpdateTopVisible(/*IN*/NJ_t * NJ, int nActive,
int iIn, /*IN*/hit_t *hit,
/*IN/OUT*/top_hits_t *tophits) {
assert(tophits != NULL);
bool bIn = false; /* placed in the list */
int i;
/* First, if the list is not full, put it in somewhere */
for (i = 0; i < tophits->nTopVisible && !bIn; i++) {
int iNode = tophits->topvisible[i];
if (iNode == iIn) {
/* this node is already in the top hit list */
bIn = true;
} else if (iNode < 0 || NJ->parent[iNode] >= 0) {
/* found an empty spot */
bIn = true;
tophits->topvisible[i] = iIn;
}
}
int iPosWorst = -1;
double dCriterionWorst = -1e20;
if (!bIn) {
/* Search for the worst hit */
for (i = 0; i < tophits->nTopVisible && !bIn; i++) {
int iNode = tophits->topvisible[i];
assert(iNode >= 0 && NJ->parent[iNode] < 0 && iNode != iIn);
besthit_t visible;
if (!GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&visible)) {
/* found an empty spot */
tophits->topvisible[i] = iIn;
bIn = true;
} else if (visible.i == hit->j && visible.j == iIn) {
/* the reverse hit is already in the top hit list */
bIn = true;
} else if (visible.criterion >= dCriterionWorst) {
iPosWorst = i;
dCriterionWorst = visible.criterion;
}
}
}
if (!bIn && iPosWorst >= 0) {
besthit_t visible = HitToBestHit(iIn, *hit);
SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&visible);
if (visible.criterion < dCriterionWorst) {
if (verbose > 2) {
int iOld = tophits->topvisible[iPosWorst];
fprintf(stderr, "TopVisible replace %d=>%d with %d=>%d\n",
iOld, tophits->visible[iOld].j, visible.i, visible.j);
}
tophits->topvisible[iPosWorst] = iIn;
}
}
if (verbose > 2) {
fprintf(stderr, "Updated TopVisible: ");
for (i = 0; i < tophits->nTopVisible; i++) {
int iNode = tophits->topvisible[i];
if (iNode >= 0 && NJ->parent[iNode] < 0) {
besthit_t bh = HitToBestHit(iNode, tophits->visible[iNode]);
SetDistCriterion(NJ, nActive, &bh);
fprintf(stderr, " %d=>%d:%.4f", bh.i, bh.j, bh.criterion);
}
}
fprintf(stderr,"\n");
}
}
/* Recompute the topvisible list */
void ResetTopVisible(/*IN/UPDATE*/NJ_t *NJ,
int nActive,
/*IN/OUT*/top_hits_t *tophits) {
besthit_t *visibleSorted = mymalloc(sizeof(besthit_t)*nActive);
int nVisible = 0; /* #entries in visibleSorted */
int iNode;
for (iNode = 0; iNode < NJ->maxnode; iNode++) {
/* skip joins involving stale nodes */
if (NJ->parent[iNode] >= 0)
continue;
besthit_t v;
if (GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&v)) {
assert(nVisible < nActive);
visibleSorted[nVisible++] = v;
}
}
assert(nVisible > 0);
qsort(/*IN/OUT*/visibleSorted,nVisible,sizeof(besthit_t),CompareHitsByCriterion);
/* Only keep the top m items, and try to avoid duplicating i->j with j->i
Note that visible(i) -> j does not necessarily imply visible(j) -> i,
so we store what the pairing was (or -1 for not used yet)
*/
int *inTopVisible = malloc(sizeof(int) * NJ->maxnodes);
int i;
for (i = 0; i < NJ->maxnodes; i++)
inTopVisible[i] = -1;
if (verbose > 2)
fprintf(stderr, "top-hit search: nActive %d nVisible %d considering up to %d items\n",
nActive, nVisible, tophits->m);
/* save the sorted indices in topvisible */
int iSave = 0;
for (i = 0; i < nVisible && iSave < tophits->nTopVisible; i++) {
besthit_t *v = &visibleSorted[i];
if (inTopVisible[v->i] != v->j) { /* not seen already */
tophits->topvisible[iSave++] = v->i;
inTopVisible[v->i] = v->j;
inTopVisible[v->j] = v->i;
}
}
while(iSave < tophits->nTopVisible)
tophits->topvisible[iSave++] = -1;
myfree(visibleSorted, sizeof(besthit_t)*nActive);
myfree(inTopVisible, sizeof(int) * NJ->maxnodes);
tophits->topvisibleAge = 0;
if (verbose > 2) {
fprintf(stderr, "Reset TopVisible: ");
for (i = 0; i < tophits->nTopVisible; i++) {
int iNode = tophits->topvisible[i];
if (iNode < 0)
break;
fprintf(stderr, " %d=>%d", iNode, tophits->visible[iNode].j);
}
fprintf(stderr,"\n");
}
}
/*
Find best hit to do in O(N*log(N) + m*L*log(N)) time, by
copying and sorting the visible list
updating out-distances for the top (up to m) candidates
selecting the best hit
if !fastest then
local hill-climbing for a better join,
using best-hit lists only, and updating
all out-distances in every best-hit list
*/
void TopHitNJSearch(/*IN/UPDATE*/NJ_t *NJ, int nActive,
/*IN/OUT*/top_hits_t *tophits,
/*OUT*/besthit_t *join) {
/* first, do we have at least m/2 candidates in topvisible?
And remember the best one */
int nCandidate = 0;
int iNodeBestCandidate = -1;
double dBestCriterion = 1e20;
int i;
for (i = 0; i < tophits->nTopVisible; i++) {
int iNode = tophits->topvisible[i];
besthit_t visible;
if (GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&visible)) {
nCandidate++;
if (iNodeBestCandidate < 0 || visible.criterion < dBestCriterion) {
iNodeBestCandidate = iNode;
dBestCriterion = visible.criterion;
}
}
}
tophits->topvisibleAge++;
/* Note we may have only nActive/2 joins b/c we try to store them once */
if (2 * tophits->topvisibleAge > tophits->m
|| (3*nCandidate < tophits->nTopVisible && 3*nCandidate < nActive)) {
/* recompute top visible */
if (verbose > 2)
fprintf(stderr, "Resetting the top-visible list at nActive=%d\n",nActive);
/* If age is low, then our visible set is becoming too sparse, because we have
recently recomputed the top visible subset. This is very rare but can happen
with -fastest. A quick-and-dirty solution is to walk up
the parents to get additional entries in top hit lists. To ensure that the
visible set becomes full, pick an arbitrary node if walking up terminates at self.
*/
if (tophits->topvisibleAge <= 2) {
if (verbose > 2)
fprintf(stderr, "Expanding visible set by walking up to active nodes at nActive=%d\n", nActive);
int iNode;
for (iNode = 0; iNode < NJ->maxnode; iNode++) {
if (NJ->parent[iNode] >= 0)
continue;
hit_t *v = &tophits->visible[iNode];
int newj = ActiveAncestor(NJ, v->j);
if (newj >= 0 && newj != v->j) {
if (newj == iNode) {
/* pick arbitrarily */
newj = 0;
while (NJ->parent[newj] >= 0 || newj == iNode)
newj++;
}
assert(newj >= 0 && newj < NJ->maxnodes
&& newj != iNode
&& NJ->parent[newj] < 0);
/* Set v to point to newj */
besthit_t bh = { iNode, newj, -1e20, -1e20, -1e20 };
SetDistCriterion(NJ, nActive, /*IN/OUT*/&bh);
v->j = newj;
v->dist = bh.dist;
}
}
}
ResetTopVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits);
/* and recurse to try again */
TopHitNJSearch(NJ, nActive, tophits, join);
return;
}
if (verbose > 2)
fprintf(stderr, "Top-visible list size %d (nActive %d m %d)\n",
nCandidate, nActive, tophits->m);
assert(iNodeBestCandidate >= 0 && NJ->parent[iNodeBestCandidate] < 0);
bool bSuccess = GetVisible(NJ, nActive, tophits, iNodeBestCandidate, /*OUT*/join);
assert(bSuccess);
assert(join->i >= 0 && NJ->parent[join->i] < 0);
assert(join->j >= 0 && NJ->parent[join->j] < 0);
if(fastest)
return;
int changed;
do {
changed = 0;
besthit_t bestI;
GetBestFromTopHits(join->i, NJ, nActive, tophits, /*OUT*/&bestI);
assert(bestI.i == join->i);
if (bestI.j != join->j && bestI.criterion < join->criterion) {
changed = 1;
if (verbose>2)
fprintf(stderr,"BetterI\t%d\t%d\t%d\t%d\t%f\t%f\n",
join->i,join->j,bestI.i,bestI.j,
join->criterion,bestI.criterion);
*join = bestI;
}
besthit_t bestJ;
GetBestFromTopHits(join->j, NJ, nActive, tophits, /*OUT*/&bestJ);
assert(bestJ.i == join->j);
if (bestJ.j != join->i && bestJ.criterion < join->criterion) {
changed = 1;
if (verbose>2)
fprintf(stderr,"BetterJ\t%d\t%d\t%d\t%d\t%f\t%f\n",
join->i,join->j,bestJ.i,bestJ.j,
join->criterion,bestJ.criterion);
*join = bestJ;
}
if(changed) nHillBetter++;
} while(changed);
}
int NGaps(/*IN*/NJ_t *NJ, int iNode) {
assert(iNode < NJ->nSeq);
int nGaps = 0;
int p;
for(p=0; p<NJ->nPos; p++) {
if (NJ->profiles[iNode]->codes[p] == NOCODE)
nGaps++;
}
return(nGaps);
}
int CompareHitsByCriterion(const void *c1, const void *c2) {
const besthit_t *hit1 = (besthit_t*)c1;
const besthit_t *hit2 = (besthit_t*)c2;
if (hit1->criterion < hit2->criterion) return(-1);
if (hit1->criterion > hit2->criterion) return(1);
return(0);
}
int CompareHitsByIJ(const void *c1, const void *c2) {
const besthit_t *hit1 = (besthit_t*)c1;
const besthit_t *hit2 = (besthit_t*)c2;
return hit1->i != hit2->i ? hit1->i - hit2->i : hit1->j - hit2->j;
}
void SortSaveBestHits(int iNode, /*IN/SORT*/besthit_t *besthits,
int nIn, int nOut,
/*IN/OUT*/top_hits_t *tophits) {
assert(nIn > 0);
assert(nOut > 0);
top_hits_list_t *l = &tophits->top_hits_lists[iNode];
/* */
qsort(/*IN/OUT*/besthits,nIn,sizeof(besthit_t),CompareHitsByCriterion);
/* First count how many we will save
Not sure if removing duplicates is actually necessary.
*/
int nSave = 0;
int jLast = -1;
int iBest;
for (iBest = 0; iBest < nIn && nSave < nOut; iBest++) {
if (besthits[iBest].i < 0)
continue;
assert(besthits[iBest].i == iNode);
int j = besthits[iBest].j;
if (j != iNode && j != jLast && j >= 0) {
nSave++;
jLast = j;
}
}
assert(nSave > 0);
#ifdef OPENMP
omp_set_lock(&tophits->locks[iNode]);
#endif
if (l->hits != NULL) {
l->hits = myfree(l->hits, l->nHits * sizeof(hit_t));
l->nHits = 0;
}
l->hits = mymalloc(sizeof(hit_t) * nSave);
l->nHits = nSave;
int iSave = 0;
jLast = -1;
for (iBest = 0; iBest < nIn && iSave < nSave; iBest++) {
int j = besthits[iBest].j;
if (j != iNode && j != jLast && j >= 0) {
l->hits[iSave].j = j;
l->hits[iSave].dist = besthits[iBest].dist;
iSave++;
jLast = j;
}
}
#ifdef OPENMP
omp_unset_lock(&tophits->locks[iNode]);
#endif
assert(iSave == nSave);
}
void TransferBestHits(/*IN/UPDATE*/NJ_t *NJ,
int nActive,
int iNode,
/*IN*/besthit_t *oldhits,
int nOldHits,
/*OUT*/besthit_t *newhits,
bool updateDistances) {
assert(iNode >= 0);
assert(NJ->parent[iNode] < 0);
int iBest;
for(iBest = 0; iBest < nOldHits; iBest++) {
besthit_t *old = &oldhits[iBest];
besthit_t *new = &newhits[iBest];
new->i = iNode;
new->j = ActiveAncestor(/*IN*/NJ, old->j);
new->dist = old->dist; /* may get reset below */
new->weight = old->weight;
new->criterion = old->criterion;
if(new->j < 0 || new->j == iNode) {
new->weight = 0;
new->dist = -1e20;
new->criterion = 1e20;
} else if (new->i != old->i || new->j != old->j) {
if (updateDistances)
SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/new);
else {
new->dist = -1e20;
new->criterion = 1e20;
}
} else {
if (updateDistances)
SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/new);
else
new->criterion = 1e20; /* leave dist alone */
}
}
}
void HitsToBestHits(/*IN*/hit_t *hits, int nHits, int iNode, /*OUT*/besthit_t *newhits) {
int i;
for (i = 0; i < nHits; i++) {
hit_t *hit = &hits[i];
besthit_t *bh = &newhits[i];
bh->i = iNode;
bh->j = hit->j;
bh->dist = hit->dist;
bh->criterion = 1e20;
bh->weight = -1; /* not the true value -- we compute these directly when needed */
}
}
besthit_t HitToBestHit(int i, hit_t hit) {
besthit_t bh;
bh.i = i;
bh.j = hit.j;
bh.dist = hit.dist;
bh.criterion = 1e20;
bh.weight = -1;
return(bh);
}
char *OpenMPString(void) {
#ifdef OPENMP
static char buf[100];
sprintf(buf, ", OpenMP (%d threads)", omp_get_max_threads());
return(buf);
#else
return("");
#endif
}
/* Algorithm 26.2.17 from Abromowitz and Stegun, Handbook of Mathematical Functions
Absolute accuracy of only about 1e-7, which is enough for us
*/
double pnorm(double x)
{
double b1 = 0.319381530;
double b2 = -0.356563782;
double b3 = 1.781477937;
double b4 = -1.821255978;
double b5 = 1.330274429;
double p = 0.2316419;
double c = 0.39894228;
if(x >= 0.0) {
double t = 1.0 / ( 1.0 + p * x );
return (1.0 - c * exp( -x * x / 2.0 ) * t *
( t *( t * ( t * ( t * b5 + b4 ) + b3 ) + b2 ) + b1 ));
}
/*else*/
double t = 1.0 / ( 1.0 - p * x );
return ( c * exp( -x * x / 2.0 ) * t *
( t *( t * ( t * ( t * b5 + b4 ) + b3 ) + b2 ) + b1 ));
}
void *mymalloc(size_t sz) {
if (sz == 0) return(NULL);
void *new = malloc(sz);
if (new == NULL) {
fprintf(stderr, "Out of memory\n");
exit(1);
}
szAllAlloc += sz;
mymallocUsed += sz;
#ifdef TRACK_MEMORY
struct mallinfo mi = mallinfo();
if (mi.arena+mi.hblkhd > maxmallocHeap)
maxmallocHeap = mi.arena+mi.hblkhd;
#endif
/* gcc malloc should always return 16-byte-aligned values... */
assert(IS_ALIGNED(new));
return (new);
}
void *mymemdup(void *data, size_t sz) {
if(data==NULL) return(NULL);
void *new = mymalloc(sz);
memcpy(/*to*/new, /*from*/data, sz);
return(new);
}
void *myrealloc(void *data, size_t szOld, size_t szNew, bool bCopy) {
if (data == NULL && szOld == 0)
return(mymalloc(szNew));
if (data == NULL || szOld == 0 || szNew == 0) {
fprintf(stderr,"Empty myrealloc\n");
exit(1);
}
if (szOld == szNew)
return(data);
void *new = NULL;
if (bCopy) {
/* Try to reduce memory fragmentation by allocating anew and copying
Seems to help in practice */
new = mymemdup(data, szNew);
myfree(data, szOld);
} else {
new = realloc(data,szNew);
if (new == NULL) {
fprintf(stderr, "Out of memory\n");
exit(1);
}
assert(IS_ALIGNED(new));
szAllAlloc += (szNew-szOld);
mymallocUsed += (szNew-szOld);
#ifdef TRACK_MEMORY
struct mallinfo mi = mallinfo();
if (mi.arena+mi.hblkhd > maxmallocHeap)
maxmallocHeap = mi.arena+mi.hblkhd;
#endif
}
return(new);
}
void *myfree(void *p, size_t sz) {
if(p==NULL) return(NULL);
free(p);
mymallocUsed -= sz;
return(NULL);
}
/******************************************************************************/
/* Minimization of a 1-dimensional function by Brent's method (Numerical Recipes)
* Borrowed from Tree-Puzzle 5.1 util.c under GPL
* Modified by M.N.P to pass in the accessory data for the optimization function,
* to use 2x bounds around the starting guess and expand them if necessary,
* and to use both a fractional and an absolute tolerance
*/
#define ITMAX 100
#define CGOLD 0.3819660
#define TINY 1.0e-20
#define ZEPS 1.0e-10
#define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d);
#define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
/* Brents method in one dimension */
double brent(double ax, double bx, double cx, double (*f)(double, void *), void *data,
double ftol, double atol,
double *foptx, double *f2optx, double fax, double fbx, double fcx)
{
int iter;
double a,b,d=0,etemp,fu,fv,fw,fx,p,q,r,tol1,tol2,u,v,w,x,xm;
double xw,wv,vx;
double e=0.0;
a=(ax < cx ? ax : cx);
b=(ax > cx ? ax : cx);
x=bx;
fx=fbx;
if (fax < fcx) {
w=ax;
fw=fax;
v=cx;
fv=fcx;
} else {
w=cx;
fw=fcx;
v=ax;
fv=fax;
}
for (iter=1;iter<=ITMAX;iter++) {
xm=0.5*(a+b);
tol1=ftol*fabs(x);
tol2=2.0*(tol1+ZEPS);
if (fabs(x-xm) <= (tol2-0.5*(b-a))
|| fabs(a-b) < atol) {
*foptx = fx;
xw = x-w;
wv = w-v;
vx = v-x;
*f2optx = 2.0*(fv*xw + fx*wv + fw*vx)/
(v*v*xw + x*x*wv + w*w*vx);
return x;
}
if (fabs(e) > tol1) {
r=(x-w)*(fx-fv);
q=(x-v)*(fx-fw);
p=(x-v)*q-(x-w)*r;
q=2.0*(q-r);
if (q > 0.0) p = -p;
q=fabs(q);
etemp=e;
e=d;
if (fabs(p) >= fabs(0.5*q*etemp) || p <= q*(a-x) || p >= q*(b-x))
d=CGOLD*(e=(x >= xm ? a-x : b-x));
else {
d=p/q;
u=x+d;
if (u-a < tol2 || b-u < tol2)
d=SIGN(tol1,xm-x);
}
} else {
d=CGOLD*(e=(x >= xm ? a-x : b-x));
}
u=(fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d));
fu=(*f)(u,data);
if (fu <= fx) {
if (u >= x) a=x; else b=x;
SHFT(v,w,x,u)
SHFT(fv,fw,fx,fu)
} else {
if (u < x) a=u; else b=u;
if (fu <= fw || w == x) {
v=w;
w=u;
fv=fw;
fw=fu;
} else if (fu <= fv || v == x || v == w) {
v=u;
fv=fu;
}
}
}
*foptx = fx;
xw = x-w;
wv = w-v;
vx = v-x;
*f2optx = 2.0*(fv*xw + fx*wv + fw*vx)/
(v*v*xw + x*x*wv + w*w*vx);
return x;
} /* brent */
#undef ITMAX
#undef CGOLD
#undef ZEPS
#undef SHFT
#undef SIGN
/* one-dimensional minimization - as input a lower and an upper limit and a trial
value for the minimum is needed: xmin < xguess < xmax
the function and a fractional tolerance has to be specified
onedimenmin returns the optimal x value and the value of the function
and its second derivative at this point
*/
double onedimenmin(double xmin, double xguess, double xmax, double (*f)(double,void*), void *data,
double ftol, double atol,
/*OUT*/double *fx, /*OUT*/double *f2x)
{
double optx, ax, bx, cx, fa, fb, fc;
/* first attempt to bracketize minimum */
if (xguess == xmin) {
ax = xmin;
bx = 2.0*xguess;
cx = 10.0*xguess;
} else if (xguess <= 2.0 * xmin) {
ax = xmin;
bx = xguess;
cx = 5.0*xguess;
} else {
ax = 0.5*xguess;
bx = xguess;
cx = 2.0*xguess;
}
if (cx > xmax)
cx = xmax;
if (bx >= cx)
bx = 0.5*(ax+cx);
if (verbose > 4)
fprintf(stderr, "onedimenmin lo %.4f guess %.4f hi %.4f range %.4f %.4f\n",
ax, bx, cx, xmin, xmax);
/* ideally this range includes the true minimum, i.e.,
fb < fa and fb < fc
if not, we gradually expand the boundaries until it does,
or we near the boundary of the allowed range and use that
*/
fa = (*f)(ax,data);
fb = (*f)(bx,data);
fc = (*f)(cx,data);
while(fa < fb && ax > xmin) {
ax = (ax+xmin)/2.0;
if (ax < 2.0*xmin) /* give up on shrinking the region */
ax = xmin;
fa = (*f)(ax,data);
}
while(fc < fb && cx < xmax) {
cx = (cx+xmax)/2.0;
if (cx > xmax * 0.95)
cx = xmax;
fc = (*f)(cx,data);
}
optx = brent(ax, bx, cx, f, data, ftol, atol, fx, f2x, fa, fb, fc);
if (verbose > 4)
fprintf(stderr, "onedimenmin reaches optimum f(%.4f) = %.4f f2x %.4f\n", optx, *fx, *f2x);
return optx; /* return optimal x */
} /* onedimenmin */
/* Numerical code for the gamma distribution is modified from the PhyML 3 code
(GNU public license) of Stephane Guindon
*/
double LnGamma (double alpha)
{
/* returns ln(gamma(alpha)) for alpha>0, accurate to 10 decimal places.
Stirling's formula is used for the central polynomial part of the procedure.
Pike MC & Hill ID (1966) Algorithm 291: Logarithm of the gamma function.
Communications of the Association for Computing Machinery, 9:684
*/
double x=alpha, f=0, z;
if (x<7) {
f=1; z=x-1;
while (++z<7) f*=z;
x=z; f=-(double)log(f);
}
z = 1/(x*x);
return f + (x-0.5)*(double)log(x) - x + .918938533204673
+ (((-.000595238095238*z+.000793650793651)*z-.002777777777778)*z
+.083333333333333)/x;
}
double IncompleteGamma(double x, double alpha, double ln_gamma_alpha)
{
/* returns the incomplete gamma ratio I(x,alpha) where x is the upper
limit of the integration and alpha is the shape parameter.
returns (-1) if in error
ln_gamma_alpha = ln(Gamma(alpha)), is almost redundant.
(1) series expansion if (alpha>x || x<=1)
(2) continued fraction otherwise
RATNEST FORTRAN by
Bhattacharjee GP (1970) The incomplete gamma integral. Applied Statistics,
19: 285-287 (AS32)
*/
int i;
double p=alpha, g=ln_gamma_alpha;
double accurate=1e-8, overflow=1e30;
double factor, gin=0, rn=0, a=0,b=0,an=0,dif=0, term=0, pn[6];
if (x==0) return (0);
if (x<0 || p<=0) return (-1);
factor=(double)exp(p*(double)log(x)-x-g);
if (x>1 && x>=p) goto l30;
/* (1) series expansion */
gin=1; term=1; rn=p;
l20:
rn++;
term*=x/rn; gin+=term;
if (term > accurate) goto l20;
gin*=factor/p;
goto l50;
l30:
/* (2) continued fraction */
a=1-p; b=a+x+1; term=0;
pn[0]=1; pn[1]=x; pn[2]=x+1; pn[3]=x*b;
gin=pn[2]/pn[3];
l32:
a++; b+=2; term++; an=a*term;
for (i=0; i<2; i++) pn[i+4]=b*pn[i+2]-an*pn[i];
if (pn[5] == 0) goto l35;
rn=pn[4]/pn[5]; dif=fabs(gin-rn);
if (dif>accurate) goto l34;
if (dif<=accurate*rn) goto l42;
l34:
gin=rn;
l35:
for (i=0; i<4; i++) pn[i]=pn[i+2];
if (fabs(pn[4]) < overflow) goto l32;
for (i=0; i<4; i++) pn[i]/=overflow;
goto l32;
l42:
gin=1-factor*gin;
l50:
return (gin);
}
double PGamma(double x, double alpha)
{
/* scale = 1/alpha */
return IncompleteGamma(x*alpha,alpha,LnGamma(alpha));
}
/* helper function to subtract timval structures */
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
int timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
double clockDiff(/*IN*/struct timeval *clock_start) {
struct timeval time_now, elapsed;
gettimeofday(/*OUT*/&time_now,NULL);
timeval_subtract(/*OUT*/&elapsed,/*IN*/&time_now,/*IN*/clock_start);
return(elapsed.tv_sec + elapsed.tv_usec*1e-6);
}
/* The random number generator is taken from D E Knuth
http://www-cs-faculty.stanford.edu/~knuth/taocp.html
*/
/* This program by D E Knuth is in the public domain and freely copyable.
* It is explained in Seminumerical Algorithms, 3rd edition, Section 3.6
* (or in the errata to the 2nd edition --- see
* http://www-cs-faculty.stanford.edu/~knuth/taocp.html
* in the changes to Volume 2 on pages 171 and following). */
/* N.B. The MODIFICATIONS introduced in the 9th printing (2002) are
included here; there's no backwards compatibility with the original. */
/* This version also adopts Brendan McKay's suggestion to
accommodate naive users who forget to call ran_start(seed). */
/* If you find any bugs, please report them immediately to
* taocp@cs.stanford.edu
* (and you will be rewarded if the bug is genuine). Thanks! */
/************ see the book for explanations and caveats! *******************/
/************ in particular, you need two's complement arithmetic **********/
#define KK 100 /* the long lag */
#define LL 37 /* the short lag */
#define MM (1L<<30) /* the modulus */
#define mod_diff(x,y) (((x)-(y))&(MM-1)) /* subtraction mod MM */
long ran_x[KK]; /* the generator state */
#ifdef __STDC__
void ran_array(long aa[],int n)
#else
void ran_array(aa,n) /* put n new random numbers in aa */
long *aa; /* destination */
int n; /* array length (must be at least KK) */
#endif
{
register int i,j;
for (j=0;j<KK;j++) aa[j]=ran_x[j];
for (;j<n;j++) aa[j]=mod_diff(aa[j-KK],aa[j-LL]);
for (i=0;i<LL;i++,j++) ran_x[i]=mod_diff(aa[j-KK],aa[j-LL]);
for (;i<KK;i++,j++) ran_x[i]=mod_diff(aa[j-KK],ran_x[i-LL]);
}
/* the following routines are from exercise 3.6--15 */
/* after calling ran_start, get new randoms by, e.g., "x=ran_arr_next()" */
#define QUALITY 1009 /* recommended quality level for high-res use */
long ran_arr_buf[QUALITY];
long ran_arr_dummy=-1, ran_arr_started=-1;
long *ran_arr_ptr=&ran_arr_dummy; /* the next random number, or -1 */
#define TT 70 /* guaranteed separation between streams */
#define is_odd(x) ((x)&1) /* units bit of x */
#ifdef __STDC__
void ran_start(long seed)
#else
void ran_start(seed) /* do this before using ran_array */
long seed; /* selector for different streams */
#endif
{
register int t,j;
long x[KK+KK-1]; /* the preparation buffer */
register long ss=(seed+2)&(MM-2);
for (j=0;j<KK;j++) {
x[j]=ss; /* bootstrap the buffer */
ss<<=1; if (ss>=MM) ss-=MM-2; /* cyclic shift 29 bits */
}
x[1]++; /* make x[1] (and only x[1]) odd */
for (ss=seed&(MM-1),t=TT-1; t; ) {
for (j=KK-1;j>0;j--) x[j+j]=x[j], x[j+j-1]=0; /* "square" */
for (j=KK+KK-2;j>=KK;j--)
x[j-(KK-LL)]=mod_diff(x[j-(KK-LL)],x[j]),
x[j-KK]=mod_diff(x[j-KK],x[j]);
if (is_odd(ss)) { /* "multiply by z" */
for (j=KK;j>0;j--) x[j]=x[j-1];
x[0]=x[KK]; /* shift the buffer cyclically */
x[LL]=mod_diff(x[LL],x[KK]);
}
if (ss) ss>>=1; else t--;
}
for (j=0;j<LL;j++) ran_x[j+KK-LL]=x[j];
for (;j<KK;j++) ran_x[j-LL]=x[j];
for (j=0;j<10;j++) ran_array(x,KK+KK-1); /* warm things up */
ran_arr_ptr=&ran_arr_started;
}
#define ran_arr_next() (*ran_arr_ptr>=0? *ran_arr_ptr++: ran_arr_cycle())
long ran_arr_cycle()
{
if (ran_arr_ptr==&ran_arr_dummy)
ran_start(314159L); /* the user forgot to initialize */
ran_array(ran_arr_buf,QUALITY);
ran_arr_buf[KK]=-1;
ran_arr_ptr=ran_arr_buf+1;
return ran_arr_buf[0];
}
/* end of code from Knuth */
double knuth_rand() {
return(9.31322574615479e-10 * ran_arr_next()); /* multiply by 2**-30 */
}
hashstrings_t *MakeHashtable(char **strings, int nStrings) {
hashstrings_t *hash = (hashstrings_t*)mymalloc(sizeof(hashstrings_t));
hash->nBuckets = 8*nStrings;
hash->buckets = (hashbucket_t*)mymalloc(sizeof(hashbucket_t) * hash->nBuckets);
int i;
for (i=0; i < hash->nBuckets; i++) {
hash->buckets[i].string = NULL;
hash->buckets[i].nCount = 0;
hash->buckets[i].first = -1;
}
for (i=0; i < nStrings; i++) {
hashiterator_t hi = FindMatch(hash, strings[i]);
if (hash->buckets[hi].string == NULL) {
/* save a unique entry */
assert(hash->buckets[hi].nCount == 0);
hash->buckets[hi].string = strings[i];
hash->buckets[hi].nCount = 1;
hash->buckets[hi].first = i;
} else {
/* record a duplicate entry */
assert(hash->buckets[hi].string != NULL);
assert(strcmp(hash->buckets[hi].string, strings[i]) == 0);
assert(hash->buckets[hi].first >= 0);
hash->buckets[hi].nCount++;
}
}
return(hash);
}
hashstrings_t *FreeHashtable(hashstrings_t* hash) {
if (hash != NULL) {
myfree(hash->buckets, sizeof(hashbucket_t) * hash->nBuckets);
myfree(hash, sizeof(hashstrings_t));
}
return(NULL);
}
#define MAXADLER 65521
hashiterator_t FindMatch(hashstrings_t *hash, char *string) {
/* Adler-32 checksum */
unsigned int hashA = 1;
unsigned int hashB = 0;
char *p;
for (p = string; *p != '\0'; p++) {
hashA = ((unsigned int)*p + hashA);
hashB = hashA+hashB;
}
hashA %= MAXADLER;
hashB %= MAXADLER;
hashiterator_t hi = (hashB*65536+hashA) % hash->nBuckets;
while(hash->buckets[hi].string != NULL
&& strcmp(hash->buckets[hi].string, string) != 0) {
hi++;
if (hi >= hash->nBuckets)
hi = 0;
}
return(hi);
}
char *GetHashString(hashstrings_t *hash, hashiterator_t hi) {
return(hash->buckets[hi].string);
}
int HashCount(hashstrings_t *hash, hashiterator_t hi) {
return(hash->buckets[hi].nCount);
}
int HashFirst(hashstrings_t *hash, hashiterator_t hi) {
return(hash->buckets[hi].first);
}
uniquify_t *UniquifyAln(alignment_t *aln) {
int nUniqueSeq = 0;
char **uniqueSeq = (char**)mymalloc(aln->nSeq * sizeof(char*)); /* iUnique -> seq */
int *uniqueFirst = (int*)mymalloc(aln->nSeq * sizeof(int)); /* iUnique -> iFirst in aln */
int *alnNext = (int*)mymalloc(aln->nSeq * sizeof(int)); /* i in aln -> next, or -1 */
int *alnToUniq = (int*)mymalloc(aln->nSeq * sizeof(int)); /* i in aln -> iUnique; many -> -1 */
int i;
for (i = 0; i < aln->nSeq; i++) {
uniqueSeq[i] = NULL;
uniqueFirst[i] = -1;
alnNext[i] = -1;
alnToUniq[i] = -1;
}
hashstrings_t *hashseqs = MakeHashtable(aln->seqs, aln->nSeq);
for (i=0; i<aln->nSeq; i++) {
hashiterator_t hi = FindMatch(hashseqs,aln->seqs[i]);
int first = HashFirst(hashseqs,hi);
if (first == i) {
uniqueSeq[nUniqueSeq] = aln->seqs[i];
uniqueFirst[nUniqueSeq] = i;
alnToUniq[i] = nUniqueSeq;
nUniqueSeq++;
} else {
int last = first;
while (alnNext[last] != -1)
last = alnNext[last];
assert(last>=0);
alnNext[last] = i;
assert(alnToUniq[last] >= 0 && alnToUniq[last] < nUniqueSeq);
alnToUniq[i] = alnToUniq[last];
}
}
assert(nUniqueSeq>0);
hashseqs = FreeHashtable(hashseqs);
uniquify_t *uniquify = (uniquify_t*)mymalloc(sizeof(uniquify_t));
uniquify->nSeq = aln->nSeq;
uniquify->nUnique = nUniqueSeq;
uniquify->uniqueFirst = uniqueFirst;
uniquify->alnNext = alnNext;
uniquify->alnToUniq = alnToUniq;
uniquify->uniqueSeq = uniqueSeq;
return(uniquify);
}
uniquify_t *FreeUniquify(uniquify_t *unique) {
if (unique != NULL) {
myfree(unique->uniqueFirst, sizeof(int)*unique->nSeq);
myfree(unique->alnNext, sizeof(int)*unique->nSeq);
myfree(unique->alnToUniq, sizeof(int)*unique->nSeq);
myfree(unique->uniqueSeq, sizeof(char*)*unique->nSeq);
myfree(unique,sizeof(uniquify_t));
unique = NULL;
}
return(unique);
}
traversal_t InitTraversal(NJ_t *NJ) {
traversal_t worked = (bool*)mymalloc(sizeof(bool)*NJ->maxnodes);
int i;
for (i=0; i<NJ->maxnodes; i++)
worked[i] = false;
return(worked);
}
void SkipTraversalInto(int node, /*IN/OUT*/traversal_t traversal) {
traversal[node] = true;
}
int TraversePostorder(int node, NJ_t *NJ, /*IN/OUT*/traversal_t traversal,
/*OPTIONAL OUT*/bool *pUp) {
if (pUp)
*pUp = false;
while(1) {
assert(node >= 0);
/* move to a child if possible */
bool found = false;
int iChild;
for (iChild=0; iChild < NJ->child[node].nChild; iChild++) {
int child = NJ->child[node].child[iChild];
if (!traversal[child]) {
node = child;
found = true;
break;
}
}
if (found)
continue; /* keep moving down */
if (!traversal[node]) {
traversal[node] = true;
return(node);
}
/* If we've already done this node, need to move up */
if (node == NJ->root)
return(-1); /* nowhere to go -- done traversing */
node = NJ->parent[node];
/* If we go up to someplace that was already marked as visited, this is due
to a change in topology, so return it marked as "up" */
if (pUp && traversal[node]) {
*pUp = true;
return(node);
}
}
}
traversal_t FreeTraversal(traversal_t traversal, NJ_t *NJ) {
myfree(traversal, sizeof(bool)*NJ->maxnodes);
return(NULL);
}
profile_t **UpProfiles(NJ_t *NJ) {
profile_t **upProfiles = (profile_t**)mymalloc(sizeof(profile_t*)*NJ->maxnodes);
int i;
for (i=0; i<NJ->maxnodes; i++) upProfiles[i] = NULL;
return(upProfiles);
}
profile_t *GetUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int outnode, bool useML) {
assert(outnode != NJ->root && outnode >= NJ->nSeq); /* not for root or leaves */
if (upProfiles[outnode] != NULL)
return(upProfiles[outnode]);
int depth;
int *pathToRoot = PathToRoot(NJ, outnode, /*OUT*/&depth);
int i;
/* depth-1 is root */
for (i = depth-2; i>=0; i--) {
int node = pathToRoot[i];
if (upProfiles[node] == NULL) {
/* Note -- SetupABCD may call GetUpProfile, but it should do it farther
up in the path to the root
*/
profile_t *profiles[4];
int nodeABCD[4];
SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML);
if (useML) {
/* If node is a child of root, then the 4th profile is of the 2nd root-sibling of node
Otherwise, the 4th profile is the up-profile of the parent of node, and that
is the branch-length we need
*/
double lenC = NJ->branchlength[nodeABCD[2]];
double lenD = NJ->branchlength[nodeABCD[3]];
if (verbose > 3) {
fprintf(stderr, "Computing UpProfile for node %d with lenC %.4f lenD %.4f pair-loglk %.3f\n",
node, lenC, lenD,
PairLogLk(profiles[2],profiles[3],lenC+lenD,NJ->nPos,NJ->transmat,&NJ->rates, /*site_lk*/NULL));
PrintNJInternal(stderr, NJ, /*useLen*/true);
}
upProfiles[node] = PosteriorProfile(/*C*/profiles[2], /*D*/profiles[3],
lenC, lenD,
NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints);
} else {
profile_t *profilesCDAB[4] = { profiles[2], profiles[3], profiles[0], profiles[1] };
double weight = QuartetWeight(profilesCDAB, NJ->distance_matrix, NJ->nPos);
if (verbose>3)
fprintf(stderr, "Compute upprofile of %d from %d and parents (vs. children %d %d) with weight %.3f\n",
node, nodeABCD[2], nodeABCD[0], nodeABCD[1], weight);
upProfiles[node] = AverageProfile(profiles[2], profiles[3],
NJ->nPos, NJ->nConstraints,
NJ->distance_matrix,
weight);
}
}
}
FreePath(pathToRoot,NJ);
assert(upProfiles[outnode] != NULL);
return(upProfiles[outnode]);
}
profile_t *DeleteUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node) {
assert(node>=0 && node < NJ->maxnodes);
if (upProfiles[node] != NULL)
upProfiles[node] = FreeProfile(upProfiles[node], NJ->nPos, NJ->nConstraints); /* returns NULL */
return(NULL);
}
profile_t **FreeUpProfiles(profile_t **upProfiles, NJ_t *NJ) {
int i;
int nUsed = 0;
for (i=0; i < NJ->maxnodes; i++) {
if (upProfiles[i] != NULL)
nUsed++;
DeleteUpProfile(upProfiles, NJ, i);
}
myfree(upProfiles, sizeof(profile_t*)*NJ->maxnodes);
if (verbose >= 3)
fprintf(stderr,"FreeUpProfiles -- freed %d\n", nUsed);
return(NULL);
}
int *PathToRoot(NJ_t *NJ, int node, /*OUT*/int *outDepth) {
int *pathToRoot = (int*)mymalloc(sizeof(int)*NJ->maxnodes);
int depth = 0;
int ancestor = node;
while(ancestor >= 0) {
pathToRoot[depth] = ancestor;
ancestor = NJ->parent[ancestor];
depth++;
}
*outDepth = depth;
return(pathToRoot);
}
int *FreePath(int *path, NJ_t *NJ) {
myfree(path, sizeof(int)*NJ->maxnodes);
return(NULL);
}
transition_matrix_t *CreateGTR(double *r/*ac ag at cg ct gt*/, double *f/*acgt*/) {
double matrix[4][MAXCODES];
assert(nCodes==4);
int i, j;
/* Place rates onto a symmetric matrix, but correct by f(target), so that
stationary distribution f[] is maintained
Leave diagonals as 0 (CreateTransitionMatrix will fix them)
*/
int imat = 0;
for (i = 0; i < nCodes; i++) {
matrix[i][i] = 0;
for (j = i+1; j < nCodes; j++) {
double rate = r[imat++];
assert(rate > 0);
/* Want t(matrix) * f to be 0 */
matrix[i][j] = rate * f[i];
matrix[j][i] = rate * f[j];
}
}
/* Compute average mutation rate */
double total_rate = 0;
for (i = 0; i < nCodes; i++)
for (j = 0; j < nCodes; j++)
total_rate += f[i] * matrix[i][j];
assert(total_rate > 1e-6);
double inv = 1.0/total_rate;
for (i = 0; i < nCodes; i++)
for (j = 0; j < nCodes; j++)
matrix[i][j] *= inv;
return(CreateTransitionMatrix(matrix,f));
}
transition_matrix_t *CreateTransitionMatrix(/*IN*/double matrix[MAXCODES][MAXCODES],
/*IN*/double stat[MAXCODES]) {
int i,j,k;
transition_matrix_t *transmat = mymalloc(sizeof(transition_matrix_t));
double sqrtstat[20];
for (i = 0; i < nCodes; i++) {
transmat->stat[i] = stat[i];
transmat->statinv[i] = 1.0/stat[i];
sqrtstat[i] = sqrt(stat[i]);
}
double sym[20*20]; /* symmetrized matrix M' */
/* set diagonals so columns sums are 0 before symmetrization */
for (i = 0; i < nCodes; i++)
for (j = 0; j < nCodes; j++)
sym[nCodes*i+j] = matrix[i][j];
for (j = 0; j < nCodes; j++) {
double sum = 0;
sym[nCodes*j+j] = 0;
for (i = 0; i < nCodes; i++)
sum += sym[nCodes*i+j];
sym[nCodes*j+j] = -sum;
}
/* M' = S**-1 M S */
for (i = 0; i < nCodes; i++)
for (j = 0; j < nCodes; j++)
sym[nCodes*i+j] *= sqrtstat[j]/sqrtstat[i];
/* eigen decomposition of M' -- note that eigenW is the transpose of what we want,
which is eigenvectors in columns */
double eigenW[20*20], eval[20], e[20];
for (i = 0; i < nCodes*nCodes; i++)
eigenW[i] = sym[i];
tred2(eigenW, nCodes, nCodes, eval, e);
tqli(eval, e, nCodes , nCodes, eigenW);
/* save eigenvalues */
for (i = 0; i < nCodes; i++)
transmat->eigenval[i] = eval[i];
/* compute eigen decomposition of M into t(codeFreq): V = S*W */
/* compute inverse of V in eigeninv: V**-1 = t(W) S**-1 */
for (i = 0; i < nCodes; i++) {
for (j = 0; j < nCodes; j++) {
transmat->eigeninv[i][j] = eigenW[nCodes*i+j] / sqrtstat[j];
transmat->eigeninvT[j][i] = transmat->eigeninv[i][j];
}
}
for (i = 0; i < nCodes; i++)
for (j = 0; j < nCodes; j++)
transmat->codeFreq[i][j] = eigenW[j*nCodes+i] * sqrtstat[i];
/* codeFreq[NOCODE] is the rotation of (1,1,...) not (1/nCodes,1/nCodes,...), which
gives correct posterior probabilities
*/
for (j = 0; j < nCodes; j++) {
transmat->codeFreq[NOCODE][j] = 0.0;
for (i = 0; i < nCodes; i++)
transmat->codeFreq[NOCODE][j] += transmat->codeFreq[i][j];
}
/* save some posterior probabilities for approximating later:
first, we compute P(B | A, t) for t = approxMLnearT, by using
V * exp(L*t) * V**-1 */
double expvalues[MAXCODES];
for (i = 0; i < nCodes; i++)
expvalues[i] = exp(approxMLnearT * transmat->eigenval[i]);
double LVinv[MAXCODES][MAXCODES]; /* exp(L*t) * V**-1 */
for (i = 0; i < nCodes; i++) {
for (j = 0; j < nCodes; j++)
LVinv[i][j] = transmat->eigeninv[i][j] * expvalues[i];
}
/* matrix transform for converting A -> B given t: transt[i][j] = P(j->i | t) */
double transt[MAXCODES][MAXCODES];
for (i = 0; i < nCodes; i++) {
for (j = 0; j < nCodes; j++) {
transt[i][j] = 0;
for (k = 0; k < nCodes; k++)
transt[i][j] += transmat->codeFreq[i][k] * LVinv[k][j];
}
}
/* nearP[i][j] = P(parent = j | both children are i) = P(j | i,i) ~ stat(j) * P(j->i | t)**2 */
for (i = 0; i < nCodes; i++) {
double nearP[MAXCODES];
double tot = 0;
for (j = 0; j < nCodes; j++) {
assert(transt[j][i] > 0);
assert(transmat->stat[j] > 0);
nearP[j] = transmat->stat[j] * transt[i][j] * transt[i][j];
tot += nearP[j];
}
assert(tot > 0);
for (j = 0; j < nCodes; j++)
nearP[j] *= 1.0/tot;
/* save nearP in transmat->nearP[i][] */
for (j = 0; j < nCodes; j++)
transmat->nearP[i][j] = nearP[j];
/* multiply by 1/stat and rotate nearP */
for (j = 0; j < nCodes; j++)
nearP[j] /= transmat->stat[j];
for (j = 0; j < nCodes; j++) {
double rot = 0;
for (k = 0; k < nCodes; k++)
rot += nearP[k] * transmat->codeFreq[i][j];
transmat->nearFreq[i][j] = rot;
}
}
return(transmat);
assert(0);
}
distance_matrix_t *TransMatToDistanceMat(transition_matrix_t *transmat) {
if (transmat == NULL)
return(NULL);
distance_matrix_t *dmat = mymalloc(sizeof(distance_matrix_t));
int i, j;
for (i=0; i<nCodes; i++) {
for (j=0; j<nCodes; j++) {
dmat->distances[i][j] = 0; /* never actually used */
dmat->eigeninv[i][j] = transmat->eigeninv[i][j];
dmat->codeFreq[i][j] = transmat->codeFreq[i][j];
}
}
/* eigentot . rotated-vector is the total frequency of the unrotated vector
(used to normalize in NormalizeFreq()
For transition matrices, we rotate by transpose of eigenvectors, so
we need to multiply by the inverse matrix by 1....1 to get this vector,
or in other words, sum the columns
*/
for(i = 0; i<nCodes; i++) {
dmat->eigentot[i] = 0.0;
for (j = 0; j<nCodes; j++)
dmat->eigentot[i] += transmat->eigeninv[i][j];
}
return(dmat);
}
/* Numerical recipes code for eigen decomposition (actually taken from RAxML rev_functions.c) */
void tred2 (double *a, const int n, const int np, double *d, double *e)
{
#define a(i,j) a[(j-1)*np + (i-1)]
#define e(i) e[i-1]
#define d(i) d[i-1]
int i, j, k, l;
double f, g, h, hh, scale;
for (i = n; i > 1; i--) {
l = i-1;
h = 0;
scale = 0;
if ( l > 1 ) {
for ( k = 1; k <= l; k++ )
scale += fabs(a(i,k));
if (scale == 0)
e(i) = a(i,l);
else {
for (k = 1; k <= l; k++) {
a(i,k) /= scale;
h += a(i,k) * a(i,k);
}
f = a(i,l);
g = -sqrt(h);
if (f < 0) g = -g;
e(i) = scale *g;
h -= f*g;
a(i,l) = f-g;
f = 0;
for (j = 1; j <=l ; j++) {
a(j,i) = a(i,j) / h;
g = 0;
for (k = 1; k <= j; k++)
g += a(j,k)*a(i,k);
for (k = j+1; k <= l; k++)
g += a(k,j)*a(i,k);
e(j) = g/h;
f += e(j)*a(i,j);
}
hh = f/(h+h);
for (j = 1; j <= l; j++) {
f = a(i,j);
g = e(j) - hh * f;
e(j) = g;
for (k = 1; k <= j; k++)
a(j,k) -= f*e(k) + g*a(i,k);
}
}
} else
e(i) = a(i,l);
d(i) = h;
}
d(1) = 0;
e(1) = 0;
for (i = 1; i <= n; i++) {
l = i-1;
if (d(i) != 0) {
for (j = 1; j <=l; j++) {
g = 0;
for (k = 1; k <= l; k++)
g += a(i,k)*a(k,j);
for (k=1; k <=l; k++)
a(k,j) -= g * a(k,i);
}
}
d(i) = a(i,i);
a(i,i) = 1;
for (j=1; j<=l; j++)
a(i,j) = a(j,i) = 0;
}
return;
#undef a
#undef e
#undef d
}
double pythag(double a, double b) {
double absa = fabs(a), absb = fabs(b);
return (absa > absb) ?
absa * sqrt(1+ (absb/absa)*(absb/absa)) :
absb == 0 ?
0 :
absb * sqrt(1+ (absa/absb)*(absa/absb));
}
void tqli(double *d, double *e, int n, int np, double *z)
{
#define z(i,j) z[(j-1)*np + (i-1)]
#define e(i) e[i-1]
#define d(i) d[i-1]
int i = 0, iter = 0, k = 0, l = 0, m = 0;
double b = 0, c = 0, dd = 0, f = 0, g = 0, p = 0, r = 0, s = 0;
for(i=2; i<=n; i++)
e(i-1) = e(i);
e(n) = 0;
for (l = 1; l <= n; l++)
{
iter = 0;
labelExtra:
for (m = l; (m < n); m++)
{
dd = fabs(d(m))+fabs(d(m+1));
if (fabs(e(m))+dd == dd)
break;
}
if (m != l)
{
assert(iter < 30);
iter++;
g = (d(l+1)-d(l))/(2*e(l));
r = pythag(g,1.);
g = d(m)-d(l)+e(l)/(g+(g<0?-r:r));
s = 1;
c = 1;
p = 0;
for (i = m-1; i>=l; i--)
{
f = s*e(i);
b = c*e(i);
r = pythag(f,g);
e(i+1) = r;
if (r == 0)
{
d (i+1) -= p;
e (m) = 0;
goto labelExtra;
}
s = f/r;
c = g/r;
g = d(i+1)-p;
r = (d(i)-g)*s + 2*c*b;
p = s*r;
d(i+1) = g + p;
g = c*r - b;
for (k=1; k <= n; k++)
{
f = z(k,i+1);
z(k,i+1) = s * z(k,i) + c*f;
z(k,i) = c * z(k,i) - s*f;
}
}
d(l) -= p;
e(l) = g;
e(m) = 0;
goto labelExtra;
}
}
return;
#undef z
#undef e
#undef d
}
#ifdef USE_SSE3
inline float mm_sum(register __m128 sum) {
#if 1
/* stupider but faster */
float f[4] ALIGNED;
_mm_store_ps(f,sum);
return(f[0]+f[1]+f[2]+f[3]);
#else
/* first we get sum[0]+sum[1], sum[2]+sum[3] by selecting 0/1 and 2/3 */
sum = _mm_add_ps(sum,_mm_shuffle_ps(sum,sum,_MM_SHUFFLE(0,1,2,3)));
/* then get sum[0]+sum[1]+sum[2]+sum[3] by selecting 0/1 and 0/1 */
sum = _mm_add_ps(sum,_mm_shuffle_ps(sum,sum,_MM_SHUFFLE(0,1,0,1)));
float f;
_mm_store_ss(&f, sum); /* save the lowest word */
return(f);
#endif
}
#endif
void vector_multiply(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n, /*OUT*/numeric_t *fOut) {
#ifdef USE_SSE3
int i;
for (i = 0; i < n; i += 4) {
__m128 a, b, c;
a = _mm_load_ps(f1+i);
b = _mm_load_ps(f2+i);
c = _mm_mul_ps(a, b);
_mm_store_ps(fOut+i,c);
}
#else
int i;
for (i = 0; i < n; i++)
fOut[i] = f1[i]*f2[i];
#endif
}
numeric_t vector_multiply_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n) {
#ifdef USE_SSE3
if (n == 4)
return(f1[0]*f2[0]+f1[1]*f2[1]+f1[2]*f2[2]+f1[3]*f2[3]);
__m128 sum = _mm_setzero_ps();
int i;
for (i = 0; i < n; i += 4) {
__m128 a, b, c;
a = _mm_load_ps(f1+i);
b = _mm_load_ps(f2+i);
c = _mm_mul_ps(a, b);
sum = _mm_add_ps(c, sum);
}
return(mm_sum(sum));
#else
int i;
numeric_t out = 0.0;
for (i=0; i < n; i++)
out += f1[i]*f2[i];
return(out);
#endif
}
/* sum(f1*f2*f3) */
numeric_t vector_multiply3_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* f3, int n) {
#ifdef USE_SSE3
__m128 sum = _mm_setzero_ps();
int i;
for (i = 0; i < n; i += 4) {
__m128 a1, a2, a3;
a1 = _mm_load_ps(f1+i);
a2 = _mm_load_ps(f2+i);
a3 = _mm_load_ps(f3+i);
sum = _mm_add_ps(_mm_mul_ps(_mm_mul_ps(a1,a2),a3),sum);
}
return(mm_sum(sum));
#else
int i;
numeric_t sum = 0.0;
for (i = 0; i < n; i++)
sum += f1[i]*f2[i]*f3[i];
return(sum);
#endif
}
numeric_t vector_dot_product_rot(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t *fBy, int n) {
#ifdef USE_SSE3
__m128 sum1 = _mm_setzero_ps();
__m128 sum2 = _mm_setzero_ps();
int i;
for (i = 0; i < n; i += 4) {
__m128 a1, a2, aBy;
a1 = _mm_load_ps(f1+i);
a2 = _mm_load_ps(f2+i);
aBy = _mm_load_ps(fBy+i);
sum1 = _mm_add_ps(_mm_mul_ps(a1, aBy), sum1);
sum2 = _mm_add_ps(_mm_mul_ps(a2, aBy), sum2);
}
return(mm_sum(sum1)*mm_sum(sum2));
#else
int i;
numeric_t out1 = 0.0;
numeric_t out2 = 0.0;
for (i=0; i < n; i++) {
out1 += f1[i]*fBy[i];
out2 += f2[i]*fBy[i];
}
return(out1*out2);
#endif
}
numeric_t vector_sum(/*IN*/numeric_t *f1, int n) {
#ifdef USE_SSE3
if (n==4)
return(f1[0]+f1[1]+f1[2]+f1[3]);
__m128 sum = _mm_setzero_ps();
int i;
for (i = 0; i < n; i+=4) {
__m128 a;
a = _mm_load_ps(f1+i);
sum = _mm_add_ps(a, sum);
}
return(mm_sum(sum));
#else
numeric_t out = 0.0;
int i;
for (i = 0; i < n; i++)
out += f1[i];
return(out);
#endif
}
void vector_multiply_by(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t fBy, int n) {
int i;
#ifdef USE_SSE3
__m128 c = _mm_set1_ps(fBy);
for (i = 0; i < n; i += 4) {
__m128 a, b;
a = _mm_load_ps(f+i);
b = _mm_mul_ps(a,c);
_mm_store_ps(f+i,b);
}
#else
for (i = 0; i < n; i++)
f[i] *= fBy;
#endif
}
void vector_add_mult(/*IN/OUT*/numeric_t *fTot, /*IN*/numeric_t *fAdd, numeric_t weight, int n) {
#ifdef USE_SSE3
int i;
__m128 w = _mm_set1_ps(weight);
for (i = 0; i < n; i += 4) {
__m128 tot, add;
tot = _mm_load_ps(fTot+i);
add = _mm_load_ps(fAdd+i);
_mm_store_ps(fTot+i, _mm_add_ps(tot, _mm_mul_ps(add,w)));
}
#else
int i;
for (i = 0; i < n; i++)
fTot[i] += fAdd[i] * weight;
#endif
}
void matrixt_by_vector4(/*IN*/numeric_t mat[4][MAXCODES], /*IN*/numeric_t vec[4], /*OUT*/numeric_t out[4]) {
#ifdef USE_SSE3
/*__m128 v = _mm_load_ps(vec);*/
__m128 o = _mm_setzero_ps();
int j;
/* result is a sum of vectors: sum(k) v[k] * mat[k][] */
for (j = 0; j < 4; j++) {
__m128 m = _mm_load_ps(&mat[j][0]);
__m128 vj = _mm_load1_ps(&vec[j]); /* is it faster to shuffle v? */
o = _mm_add_ps(o, _mm_mul_ps(vj,m));
}
_mm_store_ps(out, o);
#else
int j,k;
for (j = 0; j < 4; j++) {
double sum = 0;
for (k = 0; k < 4; k++)
sum += vec[k] * mat[k][j];
out[j] = sum;
}
#endif
}
distance_matrix_t matrixBLOSUM45 =
{
/*distances*/
{
{0, 1.31097856157468, 1.06573001937323, 1.2682782988532, 0.90471293383305, 1.05855446876905, 1.05232790675508, 0.769574440593014, 1.27579668305679, 0.964604099952603, 0.987178199640556, 1.05007594438157, 1.05464162250736, 1.1985987403937, 0.967404475245526, 0.700490199584332, 0.880060189098976, 1.09748548316685, 1.28141710375267, 0.800038509951648},
{1.31097856157468, 0, 0.8010890222701, 0.953340718498495, 1.36011107208122, 0.631543775840481, 0.791014908659279, 1.15694899265629, 0.761152570032029, 1.45014917711188, 1.17792001455227, 0.394661075648738, 0.998807558909651, 1.135143404599, 1.15432562628921, 1.05309036790541, 1.05010474413616, 1.03938321130789, 0.963216908696184, 1.20274751778601},
{1.06573001937323, 0.8010890222701, 0, 0.488217214273568, 1.10567116937273, 0.814970207038261, 0.810176440932339, 0.746487413974582, 0.61876156253224, 1.17886558630004, 1.52003670190022, 0.808442678243754, 1.2889025816028, 1.16264109995678, 1.18228799147301, 0.679475681649858, 0.853658619686283, 1.68988558988005, 1.24297493464833, 1.55207513886163},
{1.2682782988532, 0.953340718498495, 0.488217214273568, 0, 1.31581050011876, 0.769778474953791, 0.482077627352988, 0.888361752320536, 0.736360849050364, 1.76756333403346, 1.43574761894039, 0.763612910719347, 1.53386612356483, 1.74323672079854, 0.886347403928663, 0.808614044804528, 1.01590147813779, 1.59617804551619, 1.1740494822217, 1.46600946033173},
{0.90471293383305, 1.36011107208122, 1.10567116937273, 1.31581050011876, 0, 1.3836789310481, 1.37553994252576, 1.26740695314856, 1.32361065635259, 1.26087264215993, 1.02417540515351, 1.37259631233791, 1.09416720447891, 0.986982088723923, 1.59321190226694, 0.915638787768407, 0.913042853922533, 1.80744143643002, 1.3294417177004, 0.830022143283238},
{1.05855446876905, 0.631543775840481, 0.814970207038261, 0.769778474953791, 1.3836789310481, 0, 0.506942797642807, 1.17699648087288, 0.614595446514896, 1.17092829494457, 1.19833088638994, 0.637341078675405, 0.806490842729072, 1.83315144709714, 0.932064479113502, 0.850321696813199, 1.06830084665916, 1.05739353225849, 0.979907428113788, 1.5416250309563},
{1.05232790675508, 0.791014908659279, 0.810176440932339, 0.482077627352988, 1.37553994252576, 0.506942797642807, 0, 1.17007322676118, 0.769786956320484, 1.46659942462342, 1.19128214039009, 0.633592151371708, 1.27269395724349, 1.44641491621774, 0.735428579892476, 0.845319988414402, 1.06201695511881, 1.324395996498, 1.22734387448031, 1.53255698189437},
{0.769574440593014, 1.15694899265629, 0.746487413974582, 0.888361752320536, 1.26740695314856, 1.17699648087288, 1.17007322676118, 0, 1.1259007054424, 1.7025415585924, 1.38293205218175, 1.16756929156758, 1.17264582493965, 1.33271035269688, 1.07564768421292, 0.778868281341681, 1.23287107008366, 0.968539655354582, 1.42479529031801, 1.41208067821187},
{1.27579668305679, 0.761152570032029, 0.61876156253224, 0.736360849050364, 1.32361065635259, 0.614595446514896, 0.769786956320484, 1.1259007054424, 0, 1.4112324673522, 1.14630894167097, 0.967795284542623, 0.771479459384692, 1.10468029976148, 1.12334774065132, 1.02482926701639, 1.28754326478771, 1.27439749294131, 0.468683841672724, 1.47469999960758},
{0.964604099952603, 1.45014917711188, 1.17886558630004, 1.76756333403346, 1.26087264215993, 1.17092829494457, 1.46659942462342, 1.7025415585924, 1.4112324673522, 0, 0.433350517223017, 1.463460928818, 0.462965544381851, 0.66291968000662, 1.07010201755441, 1.23000200130049, 0.973485453109068, 0.963546200571036, 0.708724769805536, 0.351200119909572},
{0.987178199640556, 1.17792001455227, 1.52003670190022, 1.43574761894039, 1.02417540515351, 1.19833088638994, 1.19128214039009, 1.38293205218175, 1.14630894167097, 0.433350517223017, 0, 1.49770950074319, 0.473800072611076, 0.538473125003292, 1.37979627224964, 1.5859723170438, 0.996267398224516, 0.986095542821092, 0.725310666139274, 0.570542199221932},
{1.05007594438157, 0.394661075648738, 0.808442678243754, 0.763612910719347, 1.37259631233791, 0.637341078675405, 0.633592151371708, 1.16756929156758, 0.967795284542623, 1.463460928818, 1.49770950074319, 0, 1.0079761868248, 1.44331961488922, 0.924599080166146, 1.06275728888356, 1.05974425835993, 1.04892430642749, 0.972058829603409, 1.21378822764856},
{1.05464162250736, 0.998807558909651, 1.2889025816028, 1.53386612356483, 1.09416720447891, 0.806490842729072, 1.27269395724349, 1.17264582493965, 0.771479459384692, 0.462965544381851, 0.473800072611076, 1.0079761868248, 0, 0.72479754849538, 1.1699868662153, 1.34481214251794, 1.06435197383538, 1.05348497728858, 0.774878150710318, 0.609532859331199},
{1.1985987403937, 1.135143404599, 1.16264109995678, 1.74323672079854, 0.986982088723923, 1.83315144709714, 1.44641491621774, 1.33271035269688, 1.10468029976148, 0.66291968000662, 0.538473125003292, 1.44331961488922, 0.72479754849538, 0, 1.32968844979665, 1.21307373491949, 0.960087571600877, 0.475142555482979, 0.349485367759138, 0.692733248746636},
{0.967404475245526, 1.15432562628921, 1.18228799147301, 0.886347403928663, 1.59321190226694, 0.932064479113502, 0.735428579892476, 1.07564768421292, 1.12334774065132, 1.07010201755441, 1.37979627224964, 0.924599080166146, 1.1699868662153, 1.32968844979665, 0, 0.979087429691819, 0.97631161216338, 1.21751652292503, 1.42156458605332, 1.40887880416009},
{0.700490199584332, 1.05309036790541, 0.679475681649858, 0.808614044804528, 0.915638787768407, 0.850321696813199, 0.845319988414402, 0.778868281341681, 1.02482926701639, 1.23000200130049, 1.5859723170438, 1.06275728888356, 1.34481214251794, 1.21307373491949, 0.979087429691819, 0, 0.56109848274013, 1.76318885009194, 1.29689226231656, 1.02015839286433},
{0.880060189098976, 1.05010474413616, 0.853658619686283, 1.01590147813779, 0.913042853922533, 1.06830084665916, 1.06201695511881, 1.23287107008366, 1.28754326478771, 0.973485453109068, 0.996267398224516, 1.05974425835993, 1.06435197383538, 0.960087571600877, 0.97631161216338, 0.56109848274013, 0, 1.39547634461879, 1.02642577026706, 0.807404666228614},
{1.09748548316685, 1.03938321130789, 1.68988558988005, 1.59617804551619, 1.80744143643002, 1.05739353225849, 1.324395996498, 0.968539655354582, 1.27439749294131, 0.963546200571036, 0.986095542821092, 1.04892430642749, 1.05348497728858, 0.475142555482979, 1.21751652292503, 1.76318885009194, 1.39547634461879, 0, 0.320002937404137, 1.268589159299},
{1.28141710375267, 0.963216908696184, 1.24297493464833, 1.1740494822217, 1.3294417177004, 0.979907428113788, 1.22734387448031, 1.42479529031801, 0.468683841672724, 0.708724769805536, 0.725310666139274, 0.972058829603409, 0.774878150710318, 0.349485367759138, 1.42156458605332, 1.29689226231656, 1.02642577026706, 0.320002937404137, 0, 0.933095433689795},
{0.800038509951648, 1.20274751778601, 1.55207513886163, 1.46600946033173, 0.830022143283238, 1.5416250309563, 1.53255698189437, 1.41208067821187, 1.47469999960758, 0.351200119909572, 0.570542199221932, 1.21378822764856, 0.609532859331199, 0.692733248746636, 1.40887880416009, 1.02015839286433, 0.807404666228614, 1.268589159299, 0.933095433689795, 0}
},
/*eigeninv*/
{
{-0.216311217101265, -0.215171653035930, -0.217000020881064, -0.232890860601250, -0.25403526530177, -0.211569372858927, -0.218073620637049, -0.240585637190076, -0.214507049619293, -0.228476323330312, -0.223235445346107, -0.216116483840334, -0.206903836810903, -0.223553828183343, -0.236937609127783, -0.217652789023588, -0.211982652566286, -0.245995223308316, -0.206187718714279, -0.227670670439422},
{-0.0843931919568687, -0.0342164464991033, 0.393702284928246, -0.166018266253027, 0.0500896782860136, -0.262731388032538, 0.030139964190519, -0.253997503551094, -0.0932603349591988, -0.32884667697173, 0.199966846276877, -0.117543453869516, 0.196248237055757, -0.456448703853250, 0.139286961076387, 0.241166801918811, -0.0783508285295053, 0.377438091416498, 0.109499076984234, 0.128581669647144},
{-0.0690428674271772, 0.0133858672878363, -0.208289917312908, 0.161232925220819, 0.0735806288007248, -0.316269599838174, -0.0640708424745702, -0.117078801507436, 0.360805085405857, 0.336899760384943, 0.0332447078185156, 0.132954055834276, 0.00595209121998118, -0.157755611190327, -0.199839273133436, 0.193688928807663, 0.0970290928040946, 0.374683975138541, -0.478110944870958, -0.243290196936098},
{0.117284581850481, 0.310399467781876, -0.143513477698805, 0.088808130300351, 0.105747812943691, -0.373871701179853, 0.189069306295134, 0.133258225034741, -0.213043549687694, 0.301303731259140, -0.182085224761849, -0.161971915020789, 0.229301173581378, -0.293586313243755, -0.0260480060747498, -0.0217953684540699, 0.0202675755458796, -0.160134624443657, 0.431950096999465, -0.329885160320501},
{0.256496969244703, 0.0907408349583135, 0.0135731083898029, 0.477557831930769, -0.0727379669280703, 0.101732675207959, -0.147293025369251, -0.348325291603251, -0.255678082078362, -0.187092643740172, -0.177164064346593, -0.225921480146133, 0.422318841046522, 0.319959853469398, -0.0623652546300045, 0.0824203908606883, -0.102057926881110, 0.120728407576411, -0.156845807891241, -0.123528163091204},
{-0.00906668858975576, -0.0814722888231236, -0.0762715085459023, 0.055819989938286, -0.0540516675257271, -0.0070589302769034, -0.315813159989213, -0.0103527463419808, -0.194634331372293, -0.0185860407566822, 0.50134169352609, 0.384531812730061, -0.0405008616742061, 0.0781033650669525, 0.069334900096687, 0.396455180448549, -0.204065801866462, -0.215272089630713, 0.171046818996465, -0.396393364716348},
{0.201971098571663, 0.489747667606921, 0.00226258734592836, 0.0969514005747054, 0.0853921636903791, 0.0862068740282345, -0.465412154271164, -0.130516676347786, 0.165513616974634, 0.0712238027886633, 0.140746943067963, -0.325919272273406, -0.421213488261598, -0.163508199065965, 0.269695802810568, -0.110296405171437, -0.106834099902202, 0.00509414588152415, 0.00909215239544615, 0.0500401865589727},
{0.515854176692456, -0.087468413428258, 0.102796468891449, -0.06046105990993, -0.212014383772414, -0.259853648383794, -0.0997372883043333, -0.109934574535736, 0.284891018406112, -0.250578342940183, 0.142174204994568, 0.210384918947619, 0.118803190788946, -0.0268434355996836, 0.0103721198836548, -0.355555176478458, 0.428042332431476, -0.150610175411631, 0.0464090887952940, -0.140238796382057},
{-0.239392215229762, -0.315483492656425, 0.100205194952396, 0.197830195325302, 0.40178804665223, 0.195809461460298, -0.407817115321684, 0.0226836686147386, -0.169780276210306, 0.0818161585952184, -0.172886230584939, 0.174982644851064, 0.0868786992159535, -0.198450519980824, 0.168581078329968, -0.361514336004068, 0.238668430084722, 0.165494019791904, 0.110437707249228, -0.169592003035203},
{-0.313151735678025, 0.10757884850664, -0.49249098807229, 0.0993472335619114, -0.148695715250836, 0.0573801136941699, -0.190040373500722, 0.254848437434773, 0.134147888304352, -0.352719341442756, 0.0839609323513986, -0.207904182300122, 0.253940523323376, -0.109832138553288, 0.0980084518687944, 0.209026594443723, 0.406236051871548, -0.0521120230935943, 0.0554108014592302, 0.134681046631955},
{-0.102905214421384, 0.235803606800009, 0.213414976431981, -0.253606415825635, 0.00945656859370683, 0.259551282655855, 0.159527348902192, 0.083218761193016, -0.286815935191867, 0.0135069477264877, 0.336758103107357, -0.271707359524149, -0.0400009875851839, 0.0871186292716414, -0.171506310409388, -0.0954276577211755, 0.393467571460712, 0.111732846649458, -0.239886066474217, -0.426474828195231},
{-0.0130795552324104, 0.0758967690968058, -0.165099404017689, -0.46035152559912, 0.409888158016031, -0.0235053940299396, 0.0699393201709723, -0.161320910316996, 0.226111732196825, -0.177811841258496, -0.219073917645916, -0.00703219376737286, 0.162831878334912, 0.271670554900684, 0.451033612762052, 0.0820942662443393, -0.0904983490498446, -0.0587000279313978, -0.0938852980928252, -0.306078621571843},
{0.345092040577428, -0.257721588971295, -0.301689123771848, -0.0875212184538126, 0.161012613069275, 0.385104899829821, 0.118355290985046, -0.241723794416731, 0.083201920119646, -0.0809095291508749, -0.0820275390511991, -0.115569770103317, -0.250105681098033, -0.164197583037664, -0.299481453795592, 0.255906951902366, 0.129042051416371, 0.203761730442746, 0.347550071284268, -0.109264854744020},
{0.056345924962239, 0.072536751679082, 0.303127492633681, -0.368877185781648, -0.343024497082421, 0.206879529669083, -0.413012709639426, 0.078538816203612, 0.103382383425097, 0.288319996147499, -0.392663258459423, 0.0319588502083897, 0.220316797792669, -0.0563686494606947, -0.0869286063283735, 0.323677017794391, 0.0984875197088935, -0.0303289828821742, 0.0450197853450979, -0.0261771221270139},
{-0.253701638374729, -0.148922815783583, 0.111794052194159, 0.157313977830326, -0.269846001260543, -0.222989872703583, 0.115441028189268, -0.350456582262355, -0.0409581422905941, 0.174078744248002, -0.130673397086811, -0.123963802708056, -0.351609207081548, 0.281548012920868, 0.340382662112428, 0.180262131025562, 0.3895263830793, 0.0121546812430960, 0.214830943227063, -0.0617782909660214},
{-0.025854479416026, 0.480654788977767, -0.138024550829229, -0.130191670810919, 0.107816875829919, -0.111243997319276, -0.0679814460571245, -0.183167991080677, -0.363355166018786, -0.183934891092050, -0.216097125080962, 0.520240628803255, -0.179616013606479, 0.0664131536100941, -0.178350708111064, 0.0352047611606709, 0.223857228692892, 0.128363679623513, -0.000403433628490731, 0.224972110977704},
{0.159207394033448, -0.0371517305736114, -0.294302634912281, -0.0866954375908417, -0.259998567870054, 0.284966673982689, 0.205356416771391, -0.257613708650298, -0.264820519037270, 0.293359248624603, 0.0997476397434102, 0.151390539497369, 0.165571346773648, -0.347569523551258, 0.43792310820533, -0.0723248163210163, 0.0379214984816955, -0.0542758730251438, -0.258020301801603, 0.128680501102363},
{0.316853842351797, -0.153950010941153, -0.13387065213508, -0.0702971390607613, -0.202558481846057, -0.172941438694837, -0.068882524588574, 0.524738203063889, -0.271670479920716, -0.112864756695310, -0.146831636946145, -0.0352336188578041, -0.211108490884767, 0.097857111349555, 0.276459740956662, 0.0231297536754823, -0.0773173324868396, 0.487208384389438, -0.0734191389266824, -0.113198765573319},
{-0.274285525741087, 0.227334266052039, -0.0973746625709059, -0.00965256583655389, -0.402438444750043, 0.198586229519026, 0.0958135064575833, -0.108934376958686, 0.253641732094319, -0.0551918478254021, 0.0243640218331436, 0.181936272247179, 0.090952738347629, 0.0603352483029044, -0.0043821671755761, -0.347720824658591, -0.267879988539971, 0.403804652116592, 0.337654323971186, -0.241509293972297},
{-0.0197089518344238, 0.139681034626696, 0.251980475788267, 0.341846624362846, -0.075141195125153, 0.2184951591319, 0.268870823491343, 0.150392399018138, 0.134592404015057, -0.337050200539163, -0.313109373497998, 0.201993318439135, -0.217140733851970, -0.337622749083808, 0.135253284365068, 0.181729249828045, -0.00627813335422765, -0.197218833324039, -0.194060005031698, -0.303055888528004}
},
/*eigenval*/
{
20.29131, 0.5045685, 0.2769945, 0.1551147, 0.03235484, -0.04127639, -0.3516426, -0.469973, -0.5835191, -0.6913107, -0.7207972, -0.7907875, -0.9524307, -1.095310, -1.402153, -1.424179, -1.936704, -2.037965, -3.273561, -5.488734
},
/*eigentot and codeFreq left out, these are initialized elsewhere*/
};
/* The JTT92 matrix, D. T. Jones, W. R. Taylor, & J. M. Thorton, CABIOS 8:275 (1992)
Derived from the PhyML source code (models.c) by filling in the other side of the symmetric matrix,
scaling the entries by the stationary rate (to give the rate of a->b not b|a), to set the diagonals
so the rows sum to 0, to rescale the matrix so that the implied rate of evolution is 1.
The resulting matrix is the transpose (I think).
*/
#if 0
{
int i,j;
for (i=0; i<20; i++) for (j=0; j<i; j++) daa[j*20+i] = daa[i*20+j];
for (i = 0; i < 20; i++) for (j = 0; j < 20; j++) daa[i*20+j] *= pi[j] / 100.0;
double mr = 0; /* mean rate */
for (i = 0; i < 20; i++) {
double sum = 0;
for (j = 0; j < 20; j++)
sum += daa[i*20+j];
daa[i*20+i] = -sum;
mr += pi[i] * sum;
}
for (i = 0; i < 20*20; i++)
daa[i] /= mr;
}
#endif
double statJTT92[MAXCODES] = {0.07674789,0.05169087,0.04264509,0.05154407,0.01980301,0.04075195,0.06182989,0.07315199,0.02294399,0.05376110,0.09190390,0.05867583,0.02382594,0.04012589,0.05090097,0.06876503,0.05856501,0.01426057,0.03210196,0.06600504};
double matrixJTT92[MAXCODES][MAXCODES] = {
{ -1.247831,0.044229,0.041179,0.061769,0.042704,0.043467,0.08007,0.136501,0.02059,0.027453,0.022877,0.02669,0.041179,0.011439,0.14794,0.288253,0.362223,0.006863,0.008388,0.227247 },
{ 0.029789,-1.025965,0.023112,0.008218,0.058038,0.159218,0.014895,0.070364,0.168463,0.011299,0.019517,0.33179,0.022599,0.002568,0.038007,0.051874,0.032871,0.064714,0.010272,0.008731 },
{ 0.022881,0.019068,-1.280568,0.223727,0.014407,0.03644,0.024576,0.034322,0.165676,0.019915,0.005085,0.11144,0.012712,0.004237,0.006356,0.213134,0.098304,0.00339,0.029661,0.00678 },
{ 0.041484,0.008194,0.270413,-1.044903,0.005121,0.025095,0.392816,0.066579,0.05736,0.005634,0.003585,0.013316,0.007682,0.002049,0.007682,0.030217,0.019462,0.002049,0.023559,0.015877 },
{ 0.011019,0.022234,0.00669,0.001968,-0.56571,0.001771,0.000984,0.011609,0.013577,0.003345,0.004526,0.001377,0.0061,0.015348,0.002755,0.043878,0.008264,0.022628,0.041124,0.012199 },
{ 0.02308,0.125524,0.034823,0.019841,0.003644,-1.04415,0.130788,0.010528,0.241735,0.003644,0.029154,0.118235,0.017411,0.00162,0.066406,0.021461,0.020651,0.007288,0.009718,0.008098 },
{ 0.064507,0.017816,0.035632,0.471205,0.003072,0.198435,-0.944343,0.073107,0.015973,0.007372,0.005529,0.111197,0.011058,0.003072,0.011058,0.01843,0.019659,0.006143,0.0043,0.027646 },
{ 0.130105,0.099578,0.058874,0.09449,0.042884,0.018898,0.086495,-0.647831,0.016717,0.004361,0.004361,0.019625,0.010176,0.003634,0.017444,0.146096,0.023986,0.039976,0.005815,0.034162 },
{ 0.006155,0.074775,0.089138,0.025533,0.01573,0.1361,0.005927,0.005243,-1.135695,0.003648,0.012767,0.010259,0.007523,0.009119,0.026217,0.016642,0.010487,0.001824,0.130629,0.002508 },
{ 0.01923,0.011752,0.025106,0.005876,0.009081,0.004808,0.00641,0.003205,0.008547,-1.273602,0.122326,0.011218,0.25587,0.047542,0.005342,0.021367,0.130873,0.004808,0.017094,0.513342 },
{ 0.027395,0.0347,0.010958,0.006392,0.021003,0.065748,0.008219,0.005479,0.051137,0.209115,-0.668139,0.012784,0.354309,0.226465,0.093143,0.053877,0.022829,0.047485,0.021916,0.16437 },
{ 0.020405,0.376625,0.153332,0.015158,0.004081,0.170239,0.105525,0.015741,0.026235,0.012243,0.008162,-0.900734,0.037896,0.002332,0.012243,0.027401,0.06005,0.00583,0.004664,0.008162 },
{ 0.012784,0.010416,0.007102,0.003551,0.007339,0.01018,0.004261,0.003314,0.007812,0.113397,0.091854,0.015388,-1.182051,0.01018,0.003788,0.006865,0.053503,0.005682,0.004261,0.076466 },
{ 0.00598,0.001993,0.003987,0.001595,0.031098,0.001595,0.001993,0.001993,0.015948,0.035484,0.098877,0.001595,0.017144,-0.637182,0.006778,0.03668,0.004784,0.021131,0.213701,0.024719 },
{ 0.098117,0.037426,0.007586,0.007586,0.007081,0.082944,0.009104,0.012138,0.058162,0.005058,0.051587,0.010621,0.008092,0.008598,-0.727675,0.144141,0.059679,0.003035,0.005058,0.011632 },
{ 0.258271,0.069009,0.343678,0.040312,0.152366,0.036213,0.020498,0.137334,0.049878,0.02733,0.040312,0.032113,0.019814,0.06286,0.194728,-1.447863,0.325913,0.023914,0.043045,0.025964 },
{ 0.276406,0.037242,0.135003,0.022112,0.02444,0.029677,0.018621,0.019203,0.026768,0.142567,0.014548,0.059936,0.131511,0.006983,0.068665,0.27757,-1.335389,0.006983,0.01222,0.065174 },
{ 0.001275,0.017854,0.001134,0.000567,0.016295,0.002551,0.001417,0.007793,0.001134,0.001275,0.007368,0.001417,0.003401,0.00751,0.00085,0.004959,0.0017,-0.312785,0.010061,0.003542 },
{ 0.003509,0.006379,0.022328,0.014673,0.066664,0.007655,0.002233,0.002552,0.182769,0.010207,0.007655,0.002552,0.005741,0.170967,0.00319,0.020095,0.006698,0.022647,-0.605978,0.005103 },
{ 0.195438,0.011149,0.010493,0.020331,0.040662,0.013117,0.029512,0.030824,0.007214,0.630254,0.11805,0.009182,0.211834,0.040662,0.015084,0.024922,0.073453,0.016396,0.010493,-1.241722 }
};
double statWAG01[MAXCODES] = {0.0866279,0.043972, 0.0390894,0.0570451,0.0193078,0.0367281,0.0580589,0.0832518,0.0244314,0.048466, 0.086209, 0.0620286,0.0195027,0.0384319,0.0457631,0.0695179,0.0610127,0.0143859,0.0352742,0.0708956};
double matrixWAG01[MAXCODES][MAXCODES] = {
{-1.117151, 0.050147, 0.046354, 0.067188, 0.093376, 0.082607, 0.143908, 0.128804, 0.028817, 0.017577, 0.036177, 0.082395, 0.081234, 0.019138, 0.130789, 0.306463, 0.192846, 0.010286, 0.021887, 0.182381},
{0.025455, -0.974318, 0.029321, 0.006798, 0.024376, 0.140086, 0.020267, 0.026982, 0.098628, 0.008629, 0.022967, 0.246964, 0.031527, 0.004740, 0.031358, 0.056495, 0.025586, 0.053714, 0.017607, 0.011623},
{0.020916, 0.026065, -1.452438, 0.222741, 0.010882, 0.063328, 0.038859, 0.046176, 0.162306, 0.022737, 0.005396, 0.123567, 0.008132, 0.003945, 0.008003, 0.163042, 0.083283, 0.002950, 0.044553, 0.008051},
{0.044244, 0.008819, 0.325058, -0.989665, 0.001814, 0.036927, 0.369645, 0.051822, 0.055719, 0.002361, 0.005077, 0.028729, 0.006212, 0.002798, 0.025384, 0.064166, 0.022443, 0.007769, 0.019500, 0.009120},
{0.020812, 0.010703, 0.005375, 0.000614, -0.487357, 0.002002, 0.000433, 0.006214, 0.005045, 0.003448, 0.007787, 0.001500, 0.007913, 0.008065, 0.002217, 0.028525, 0.010395, 0.014531, 0.011020, 0.020307},
{0.035023, 0.117008, 0.059502, 0.023775, 0.003809, -1.379785, 0.210830, 0.012722, 0.165524, 0.004391, 0.033516, 0.150135, 0.059565, 0.003852, 0.035978, 0.039660, 0.033070, 0.008316, 0.008777, 0.011613},
{0.096449, 0.026759, 0.057716, 0.376214, 0.001301, 0.333275, -1.236894, 0.034593, 0.034734, 0.007763, 0.009400, 0.157479, 0.019202, 0.004944, 0.041578, 0.042955, 0.050134, 0.009540, 0.011961, 0.035874},
{0.123784, 0.051085, 0.098345, 0.075630, 0.026795, 0.028838, 0.049604, -0.497615, 0.021792, 0.002661, 0.005356, 0.032639, 0.015212, 0.004363, 0.021282, 0.117240, 0.019732, 0.029444, 0.009052, 0.016361},
{0.008127, 0.054799, 0.101443, 0.023863, 0.006384, 0.110105, 0.014616, 0.006395, -0.992342, 0.003543, 0.012807, 0.022832, 0.010363, 0.017420, 0.017851, 0.018979, 0.012136, 0.006733, 0.099319, 0.003035},
{0.009834, 0.009511, 0.028192, 0.002006, 0.008654, 0.005794, 0.006480, 0.001549, 0.007029, -1.233162, 0.161294, 0.016472, 0.216559, 0.053891, 0.005083, 0.016249, 0.074170, 0.010808, 0.021372, 0.397837},
{0.036002, 0.045028, 0.011900, 0.007673, 0.034769, 0.078669, 0.013957, 0.005547, 0.045190, 0.286902, -0.726011, 0.023303, 0.439180, 0.191376, 0.037625, 0.031191, 0.029552, 0.060196, 0.036066, 0.162890},
{0.058998, 0.348377, 0.196082, 0.031239, 0.004820, 0.253558, 0.168246, 0.024319, 0.057967, 0.021081, 0.016767, -1.124580, 0.060821, 0.005783, 0.036254, 0.062960, 0.090292, 0.008952, 0.008675, 0.019884},
{0.018288, 0.013983, 0.004057, 0.002124, 0.007993, 0.031629, 0.006450, 0.003564, 0.008272, 0.087143, 0.099354, 0.019123, -1.322098, 0.024370, 0.003507, 0.010109, 0.031033, 0.010556, 0.008769, 0.042133},
{0.008490, 0.004143, 0.003879, 0.001885, 0.016054, 0.004030, 0.003273, 0.002014, 0.027402, 0.042734, 0.085315, 0.003583, 0.048024, -0.713669, 0.006512, 0.022020, 0.006934, 0.061698, 0.260332, 0.026213},
{0.069092, 0.032635, 0.009370, 0.020364, 0.005255, 0.044829, 0.032773, 0.011698, 0.033438, 0.004799, 0.019973, 0.026747, 0.008229, 0.007754, -0.605590, 0.077484, 0.038202, 0.006695, 0.010376, 0.015124},
{0.245933, 0.089317, 0.289960, 0.078196, 0.102703, 0.075066, 0.051432, 0.097899, 0.054003, 0.023306, 0.025152, 0.070562, 0.036035, 0.039831, 0.117705, -1.392239, 0.319421, 0.038212, 0.057419, 0.016981},
{0.135823, 0.035501, 0.129992, 0.024004, 0.032848, 0.054936, 0.052685, 0.014461, 0.030308, 0.093371, 0.020915, 0.088814, 0.097083, 0.011008, 0.050931, 0.280341, -1.154973, 0.007099, 0.018643, 0.088894},
{0.001708, 0.017573, 0.001086, 0.001959, 0.010826, 0.003257, 0.002364, 0.005088, 0.003964, 0.003208, 0.010045, 0.002076, 0.007786, 0.023095, 0.002105, 0.007908, 0.001674, -0.466694, 0.037525, 0.005516},
{0.008912, 0.014125, 0.040205, 0.012058, 0.020133, 0.008430, 0.007267, 0.003836, 0.143398, 0.015555, 0.014757, 0.004934, 0.015861, 0.238943, 0.007998, 0.029135, 0.010779, 0.092011, -0.726275, 0.011652},
{0.149259, 0.018739, 0.014602, 0.011335, 0.074565, 0.022417, 0.043805, 0.013932, 0.008807, 0.581952, 0.133956, 0.022726, 0.153161, 0.048356, 0.023429, 0.017317, 0.103293, 0.027186, 0.023418, -1.085487},
};
/* Le-Gascuel 2008 model data from Harry Yoo
https://github.com/hyoo/FastTree
*/
double statLG08[MAXCODES] = {0.079066, 0.055941, 0.041977, 0.053052, 0.012937, 0.040767, 0.071586, 0.057337, 0.022355, 0.062157, 0.099081, 0.0646, 0.022951, 0.042302, 0.04404, 0.061197, 0.053287, 0.012066, 0.034155, 0.069147};
double matrixLG08[MAXCODES][MAXCODES] = {
{-1.08959879,0.03361031,0.02188683,0.03124237,0.19680136,0.07668542,0.08211337,0.16335306,0.02837339,0.01184642,0.03125763,0.04242021,0.08887270,0.02005907,0.09311189,0.37375830,0.16916131,0.01428853,0.01731216,0.20144931},
{0.02378006,-0.88334349,0.04206069,0.00693409,0.02990323,0.15707674,0.02036079,0.02182767,0.13574610,0.00710398,0.01688563,0.35388551,0.02708281,0.00294931,0.01860218,0.04800569,0.03238902,0.03320688,0.01759004,0.00955956},
{0.01161996,0.03156149,-1.18705869,0.21308090,0.02219603,0.07118238,0.02273938,0.06034785,0.18928374,0.00803870,0.00287235,0.09004368,0.01557359,0.00375798,0.00679131,0.16825837,0.08398226,0.00190474,0.02569090,0.00351296},
{0.02096312,0.00657599,0.26929909,-0.86328733,0.00331871,0.02776660,0.27819699,0.04482489,0.04918511,0.00056712,0.00079981,0.01501150,0.00135537,0.00092395,0.02092662,0.06579888,0.02259266,0.00158572,0.00716768,0.00201422},
{0.03220119,0.00691547,0.00684065,0.00080928,-0.86781864,0.00109716,0.00004527,0.00736456,0.00828668,0.00414794,0.00768465,0.00017162,0.01156150,0.01429859,0.00097521,0.03602269,0.01479316,0.00866942,0.01507844,0.02534728},
{0.03953956,0.11446966,0.06913053,0.02133682,0.00345736,-1.24953177,0.16830979,0.01092385,0.19623161,0.00297003,0.02374496,0.13185209,0.06818543,0.00146170,0.02545052,0.04989165,0.04403378,0.00962910,0.01049079,0.00857458},
{0.07434507,0.02605508,0.03877888,0.37538659,0.00025048,0.29554848,-0.84254259,0.02497249,0.03034386,0.00316875,0.00498760,0.12936820,0.01243696,0.00134660,0.03002373,0.04380857,0.04327684,0.00557310,0.00859294,0.01754095},
{0.11846020,0.02237238,0.08243001,0.04844538,0.03263985,0.01536392,0.02000178,-0.50414422,0.01785951,0.00049912,0.00253779,0.01700817,0.00800067,0.00513658,0.01129312,0.09976552,0.00744439,0.01539442,0.00313512,0.00439779},
{0.00802225,0.05424651,0.10080372,0.02072557,0.01431930,0.10760560,0.00947583,0.00696321,-1.09324335,0.00243405,0.00818899,0.01558729,0.00989143,0.01524917,0.01137533,0.02213166,0.01306114,0.01334710,0.11863394,0.00266053},
{0.00931296,0.00789336,0.01190322,0.00066446,0.01992916,0.00452837,0.00275137,0.00054108,0.00676776,-1.41499789,0.25764421,0.00988722,0.26563382,0.06916358,0.00486570,0.00398456,0.06425393,0.00694043,0.01445289,0.66191466},
{0.03917027,0.02990732,0.00677980,0.00149374,0.05885464,0.05771026,0.00690325,0.00438541,0.03629495,0.41069624,-0.79375308,0.01362360,0.62543296,0.25688578,0.02467704,0.01806113,0.03001512,0.06139358,0.02968934,0.16870919},
{0.03465896,0.40866276,0.13857164,0.01827910,0.00085698,0.20893479,0.11674330,0.01916263,0.04504313,0.01027583,0.00888247,-0.97644156,0.04241650,0.00154510,0.02521473,0.04836478,0.07344114,0.00322392,0.00852278,0.01196402},
{0.02579765,0.01111131,0.00851489,0.00058635,0.02051079,0.03838702,0.00398738,0.00320253,0.01015515,0.09808327,0.14487451,0.01506968,-1.54195698,0.04128536,0.00229163,0.00796306,0.04636929,0.01597787,0.01104642,0.04357735},
{0.01073203,0.00223024,0.00378708,0.00073673,0.04675419,0.00151673,0.00079574,0.00378966,0.02885576,0.04707045,0.10967574,0.00101178,0.07609486,-0.81061579,0.00399600,0.01530562,0.00697985,0.10394083,0.33011973,0.02769432},
{0.05186360,0.01464471,0.00712508,0.01737179,0.00331981,0.02749383,0.01847072,0.00867414,0.02240973,0.00344749,0.01096857,0.01718973,0.00439734,0.00416018,-0.41664685,0.05893117,0.02516738,0.00418956,0.00394655,0.01305787},
{0.28928853,0.05251612,0.24529879,0.07590089,0.17040121,0.07489439,0.03745080,0.10648187,0.06058559,0.00392302,0.01115539,0.04581702,0.02123285,0.02214217,0.08188943,-1.42842431,0.39608294,0.01522956,0.02451220,0.00601987},
{0.11400727,0.03085239,0.10660988,0.02269274,0.06093244,0.05755704,0.03221430,0.00691855,0.03113348,0.05508469,0.01614250,0.06057985,0.10765893,0.00879238,0.03045173,0.34488735,-1.23444419,0.00750412,0.01310009,0.11660005},
{0.00218053,0.00716244,0.00054751,0.00036065,0.00808574,0.00284997,0.00093936,0.00323960,0.00720403,0.00134729,0.00747646,0.00060216,0.00840002,0.02964754,0.00114785,0.00300276,0.00169919,-0.44275283,0.03802969,0.00228662},
{0.00747852,0.01073967,0.02090366,0.00461457,0.03980863,0.00878929,0.00409985,0.00186756,0.18125441,0.00794180,0.01023445,0.00450612,0.01643896,0.26654152,0.00306072,0.01368064,0.00839668,0.10764993,-0.71435091,0.00851526},
{0.17617706,0.01181629,0.00578676,0.00262530,0.13547871,0.01454379,0.01694332,0.00530363,0.00822937,0.73635171,0.11773937,0.01280613,0.13129028,0.04526924,0.02050210,0.00680190,0.15130413,0.01310401,0.01723920,-1.33539639}
};
|
GB_assign_zombie3.c | //------------------------------------------------------------------------------
// GB_assign_zombie3: delete entries in C(:,j) for C_replace_phase
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// For GrB_Row_assign or GrB_Col_assign, C(I,j)<#M,repl>=any must delete all
// entries C(i,j) outside of C(I,j), if the mask M(i,0) (or its complement) is
// zero. This step is not done for GxB_*_subassign, since that method does not
// modify anything outside IxJ.
// GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other.
#include "GB_assign.h"
void GB_assign_zombie3
(
GrB_Matrix Z, // the matrix C, or a copy
const GrB_Matrix M,
const bool Mask_comp,
const bool Mask_struct,
const int64_t j, // vector index with entries to delete
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get Z (:,j)
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Zh = Z->h ;
const int64_t *GB_RESTRICT Zp = Z->p ;
int64_t *GB_RESTRICT Zi = Z->i ;
int64_t pZ_start, pZ_end, pleft = 0, pright = Z->nvec-1 ;
GB_lookup (Z->is_hyper, Zh, Zp, &pleft, pright, j, &pZ_start, &pZ_end) ;
int64_t nzombies = Z->nzombies ;
const int64_t zjnz = pZ_end - pZ_start ;
//--------------------------------------------------------------------------
// get M(:,0)
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Mp = M->p ;
const int64_t *GB_RESTRICT Mi = M->i ;
const GB_void *GB_RESTRICT Mx = (Mask_struct ? NULL : (M->x)) ;
const size_t msize = M->type->size ;
int64_t pM_start = Mp [0] ;
int64_t pM_end = Mp [1] ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (zjnz, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// delete entries from Z(:,j) that are outside I, if the mask M allows it
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t p1, p2 ;
GB_PARTITION (p1, p2, zjnz, taskid, ntasks) ;
for (int64_t pZ = pZ_start + p1 ; pZ < pZ_start + p2 ; pZ++)
{
//------------------------------------------------------------------
// get Z(i,j)
//------------------------------------------------------------------
int64_t i = Zi [pZ] ;
if (!GB_IS_ZOMBIE (i))
{
//--------------------------------------------------------------
// Z(i,j) is outside Z(I,j) if i is not in the list I
//--------------------------------------------------------------
bool i_outside = !GB_ij_is_in_list (I, nI, i, Ikind, Icolon) ;
if (i_outside)
{
//----------------------------------------------------------
// Z(i,j) is a live entry not in the Z(I,J) submatrix
//----------------------------------------------------------
// Check the mask M to see if it should be deleted.
int64_t pM = pM_start ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
bool mij = false ;
if (found)
{
// found it
mij = GB_mcast (Mx, pM, msize) ;
}
if (Mask_comp)
{
// negate the mask if Mask_comp is true
mij = !mij ;
}
if (!mij)
{
// delete Z(i,j) by marking it as a zombie
nzombies++ ;
Zi [pZ] = GB_FLIP (i) ;
}
}
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
Z->nzombies = nzombies ;
}
|
GB_binop__isne_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint64)
// A*D function (colscale): GB (_AxD__isne_uint64)
// D*A function (rowscale): GB (_DxB__isne_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint64)
// C=scalar+B GB (_bind1st__isne_uint64)
// C=scalar+B' GB (_bind1st_tran__isne_uint64)
// C=A+scalar GB (_bind2nd__isne_uint64)
// C=A'+scalar GB (_bind2nd_tran__isne_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT64 || GxB_NO_ISNE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__min_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp64)
// A*D function (colscale): GB (_AxD__min_fp64)
// D*A function (rowscale): GB (_DxB__min_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__min_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__min_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp64)
// C=scalar+B GB (_bind1st__min_fp64)
// C=scalar+B' GB (_bind1st_tran__min_fp64)
// C=A+scalar GB (_bind2nd__min_fp64)
// C=A'+scalar GB (_bind2nd_tran__min_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = fmin (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmin (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = fmin (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = fmin (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmin (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmin (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_relax.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Relaxation scheme
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "Common.h"
#include "_hypre_lapack.h"
#include "../sstruct_ls/gselim.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_type,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local;
HYPRE_Real *Vtemp_data;
if (relax_type != 10)
{
Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
Vtemp_data = hypre_VectorData(Vtemp_local);
}
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data = NULL;
HYPRE_Real *tmp_data;
hypre_Vector *Ztemp_local;
HYPRE_Real *Ztemp_data;
hypre_CSRMatrix *A_CSR;
HYPRE_Int *A_CSR_i;
HYPRE_Int *A_CSR_j;
HYPRE_Real *A_CSR_data;
hypre_Vector *f_vector;
HYPRE_Real *f_vector_data;
HYPRE_Int i, j, jr;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int column;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int num_recvs;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id, ip, p;
HYPRE_Int vec_start, vec_len;
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res0, res2;
HYPRE_Real one_minus_weight;
HYPRE_Real one_minus_omega;
HYPRE_Real prod;
one_minus_weight = 1.0 - relax_weight;
one_minus_omega = 1.0 - omega;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------------
* Switch statement to direct control based on relax_type:
* relax_type = 0 -> Jacobi or CF-Jacobi
* relax_type = 1 -> Gauss-Seidel <--- very slow, sequential
* relax_type = 2 -> Gauss_Seidel: interior points in parallel ,
* boundary sequential
* relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (forward solve)
* relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (backward solve)
* relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node
* relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor
* with outer relaxation parameters
* relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR
* relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel
* relax_type = 10 -> On-processor direct forward solve for matrices with
* triangular structure (indices need not be ordered
* triangular)
* relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve
* relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve
* relax_type = 15 -> CG
* relax_type = 16 -> Scaled Chebyshev
* relax_type = 17 -> FCF-Jacobi
* relax_type = 18 -> L1-Jacobi
* relax_type = 9, 99, 98 -> Direct solve, Gaussian elimination
* relax_type = 19-> Direct Solve, (old version)
* relax_type = 29-> Direct solve: use gaussian elimination & BLAS
* (with pivoting) (old version)
*-----------------------------------------------------------------------*/
switch (relax_type)
{
case 0: /* Weighted Jacobi */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* printf("!! Proc %d: n %d, num_sends %d, num_cols_offd %d\n", my_id, n, num_sends, num_cols_offd); */
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 5: /* Hybrid: Jacobi off-processor,
chaotic Gauss-Seidel on-processor */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
/* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */
case 3:
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
#if defined(HYPRE_USING_PERSISTENT_COMM)
// JSP: persistent comm can be similarly used for other smoothers
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (num_procs > 1)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
#if defined(HYPRE_USING_PERSISTENT_COMM)
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
Vext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
#else
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
#endif
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; i++)
{
v_buf_data[i-begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data);
#else
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, Vext_data);
#endif
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, Vext_data);
#else
hypre_ParCSRCommHandleDestroy(comm_handle);
#endif
comm_handle = NULL;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime();
#endif
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
#ifndef HYPRE_USING_PERSISTENT_COMM
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime();
#endif
}
break;
case 1: /* Gauss-Seidel VERY SLOW */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
/*
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
} */
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
for (j=vec_start; j < vec_start+vec_len; j++)
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr,requests,status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr,requests,status);
}
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
hypre_MPI_Barrier(comm);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
}
break;
case 2: /* Gauss-Seidel: relax interior points in parallel, boundary
sequentially */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
/*
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
} */
/*-----------------------------------------------------------------
* Relax interior points first
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ((A_offd_i[i+1]-A_offd_i[i]) == zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& (A_offd_i[i+1]-A_offd_i[i]) == zero
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
for (j=vec_start; j < vec_start+vec_len; j++)
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr,requests,status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr,requests,status);
}
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ((A_offd_i[i+1]-A_offd_i[i]) != zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& (A_offd_i[i+1]-A_offd_i[i]) != zero
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
hypre_MPI_Barrier(comm);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
}
break;
case 4: /* Hybrid: Jacobi off-processor,
Gauss-Seidel/SOR on-processor
(backward loop) */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 6: /* Hybrid: Jacobi off-processor,
Symm. Gauss-Seidel/ SSOR on-processor
with outer relaxation parameter */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 7: /* Jacobi (uses ParMatvec) */
{
/*-----------------------------------------------------------------
* Copy f into temporary vector.
*-----------------------------------------------------------------*/
//hypre_SeqVectorPrefetch(hypre_ParVectorLocalVector(Vtemp), HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(hypre_ParVectorLocalVector(f), HYPRE_MEMORY_DEVICE);
hypre_ParVectorCopy(f, Vtemp);
/*-----------------------------------------------------------------
* Perform Matvec Vtemp=f-Au
*-----------------------------------------------------------------*/
hypre_ParCSRMatrixMatvec(-relax_weight,A, u, relax_weight, Vtemp);
#if defined(HYPRE_USING_CUDA)
hypreDevice_IVAXPY(n, l1_norms, Vtemp_data, u_data);
#else
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
u_data[i] += Vtemp_data[i] / l1_norms[i];
}
#endif
}
break;
case 8: /* hybrid L1 Symm. Gauss-Seidel */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
/* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */
case 10:
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
#ifdef HYPRE_USING_PERSISTENT_COMM
// JSP: persistent comm can be similarly used for other smoothers
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (num_procs > 1)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
Vext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
#else
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
#endif
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; i++)
{
v_buf_data[i - begin]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data);
#else
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
#endif
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, Vext_data);
#else
hypre_ParCSRCommHandleDestroy(comm_handle);
#endif
comm_handle = NULL;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
}
// Check for ordering of matrix. If stored, get pointer, otherwise
// compute ordering and point matrix variable to array.
HYPRE_Int *proc_ordering;
if (!hypre_ParCSRMatrixProcOrdering(A)) {
proc_ordering = hypre_CTAlloc(HYPRE_Int, n, HYPRE_MEMORY_HOST);
hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, n);
hypre_ParCSRMatrixProcOrdering(A) = proc_ordering;
}
else {
proc_ordering = hypre_ParCSRMatrixProcOrdering(A);
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime();
#endif
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point row; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If row is of the right type ( C or F ) and diagonal is
* nonzero, relax point row; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[row] == relax_points
&& A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If row is of the right type ( C or F ) and diagonal is
* nonzero, relax point row; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[row] == relax_points
&& A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
#ifndef HYPRE_USING_PERSISTENT_COMM
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime();
#endif
}
break;
case 13: /* hybrid L1 Gauss-Seidel forward solve */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 14: /* hybrid L1 Gauss-Seidel backward solve */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 19: /* Direct solve: use gaussian elimination */
{
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
if (n)
{
#ifndef HYPRE_NO_GLOBAL_PARTITION
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
column = A_CSR_j[jj];
A_mat[i*n_global+column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
hypre_gselim(A_mat,b_vec,n_global,relax_error);
for (i = 0; i < n; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#endif
}
break;
case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */
{
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
HYPRE_Int info;
HYPRE_Int one_i = 1;
HYPRE_Int *piv;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
if (n)
{
#ifndef HYPRE_NO_GLOBAL_PARTITION
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
/* need col major */
column = A_CSR_j[jj];
A_mat[i + n_global*column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST);
/* write over A with LU */
hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info);
/*now b_vec = inv(A)*b_vec */
hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info);
hypre_TFree(piv, HYPRE_MEMORY_HOST);
for (i = 0; i < n; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#endif
}
break;
}
return (relax_error);
}
|
main_opt.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <time.h>
#include <string.h>
#include <omp.h>
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
#include "metal.h"
#include "dielectric.h"
#include "lambertian.h"
Vector color(const Ray r, Hitable_List *world_list, int depth)
{
Hit_Record rec;
if (world_list->hitable.hit((const void *) world_list, r, 0.001, FLT_MAX, &rec)) {
Ray scattered;
Vector attenuation;
if (depth < 50 && rec.mat_ptr.scatter((const void *) rec.mat_ptr.inst, r, rec, &attenuation, &scattered)) {
return attenuation.mul(attenuation, color(scattered, world_list, depth + 1));
} else {
return new_vector(new_point(0, 0, 0));
}
} else {
Vector unit_direction = unit_vector(r.direction(r));
float t = 0.5 * (unit_direction.point.y + 1.0);
Vector v1 = new_vector(new_point(1.0, 1.0, 1.0));
Vector v2 = new_vector(new_point(0.5, 0.7, 1.0));
return v1.add(v1.mul_scalar(v1, 1.0 - t), v1.mul_scalar(v2, t));
}
}
struct timespec diff_time(struct timespec start, struct timespec end)
{
struct timespec elapsed;
if ((end.tv_nsec - start.tv_nsec) < 0) {
elapsed.tv_sec = end.tv_sec - start.tv_sec - 1;
elapsed.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
elapsed.tv_sec = end.tv_sec - start.tv_sec;
elapsed.tv_nsec = end.tv_nsec - start.tv_nsec;
}
return elapsed;
}
int main(int argc, char **argv) {
if (argc != 3) {
fprintf(stderr, "%d\n", argc);
fprintf(stderr, "%s [input file] [output file]\n", argv[0]);
exit(-1);
}
const char *infile = argv[1];
const char *outfile = argv[2];
FILE *fin = fopen(infile, "r");
FILE *fout = fopen(outfile, "wb");
int nx; // length
int ny; // width
int ns = 10; // sampling times
int sphere_count;
// float R = cos(M_PI / 4);
fscanf(fin, "%d %d", &nx, &ny);
fscanf(fin, "%d", &sphere_count);
fprintf(fout, "P3\n%d %d \n255\n", nx, ny);
Hitable *list[sphere_count];
Sphere *sphere;
Lambertian *lam;
Dielectric *diel;
Metal *metal;
for (int i = 0; i < sphere_count; i++) {
float radius;
char material_str[11];
Vector sphere_v = new_vector(new_point(0, 0, 0));
Vector material_v = new_vector(new_point(0, 0, 0));
fscanf(fin, "%f %f %f", &sphere_v.point.x, &sphere_v.point.y, &sphere_v.point.z);
fscanf(fin, "%f", &radius);
fscanf(fin, "%s", material_str);
if (!strcmp(material_str, "metal")) {
fscanf(fin, "%f %f %f", &material_v.point.x, &material_v.point.y, &material_v.point.z);
sphere = (Sphere *) malloc(sizeof(Sphere));
metal = (Metal *) malloc(sizeof(Metal));
*metal = new_metal(material_v, 0.0);
*sphere = new_sphere(sphere_v, radius, (void *) metal, metal->mat.scatter);
} else if (!strcmp(material_str, "lambertian")) {
fscanf(fin, "%f %f %f", &material_v.point.x, &material_v.point.y, &material_v.point.z);
sphere = (Sphere *) malloc(sizeof(Sphere));
lam = (Lambertian *) malloc(sizeof(Lambertian));
*lam = new_lambertian(material_v);
*sphere = new_sphere(sphere_v, radius, (void *) lam, lam->mat.scatter);
} else if (!strcmp(material_str, "dielectric")) {
float ref_idx;
fscanf(fin, "%f", &ref_idx);
sphere = (Sphere *) malloc(sizeof(Sphere));
diel = (Dielectric *) malloc(sizeof(Dielectric));
*diel = new_dielectric(ref_idx);
*sphere = new_sphere(sphere_v, radius, (void *) diel, diel->mat.scatter);
} else {
fprintf(stderr, "Unkown Material\n");
exit(-1);
}
list[i] = &sphere->hitable;
list[i]->inst = (void *) sphere;
}
Hitable_List world = new_hitable_list(list, sphere_count);
Vector lookfrom = new_vector(new_point(13, 2, 3));
Vector lookat = new_vector(new_point(0, 0, 0));
float dist_to_focus = 10.0;
float aperture = 0.1;
struct timespec start_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
Camera cam = new_camera(lookfrom, lookat, new_vector(new_point(0, 1, 0)), 20, (float) nx / (float) ny, aperture, dist_to_focus);
//omp_set_num_threads(8);//omp_get_num_procs());
Color *output = (Color *) malloc(nx * ny * sizeof(Color));
#pragma omp parallel firstprivate(nx, ny, ns, cam, world)
{
#pragma omp for schedule(dynamic, 10) collapse(2)
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
Vector col = new_vector(new_point(0, 0, 0));
float point_x = 0.0, point_y = 0.0, point_z = 0.0;
/* Sampling ns times per pixel area */
#pragma omp parallel for reduction(+:point_x, point_y, point_z) default(none) firstprivate(i, j, nx, ny, ns, cam, world) schedule(dynamic, 1)
for (int s = 0; s < ns; s++) {
/* Centered at the center point of the pixel, the pixel outward distance is (0.0, 1.0] */
float u = (float) (i + drand48()) / (float) nx;
float v = (float) (j + drand48()) / (float) ny;
/* Obtain the color value of the random sampling point in this pixel area */
Ray r = cam.get_ray(cam, u, v);
// Vector p = r.point_at_parameter(r, 2.0);
/* Accumulate the color values of all ns random sample points of this point area */
Vector temp = color(r, &world, 0);
point_x += temp.point.x;
point_y += temp.point.y;
point_z += temp.point.z;
}
col.point.x = point_x;
col.point.y = point_y;
col.point.z = point_z;
/* Divide the color accumulated value of all ns random sampling points of this pixel area by ns to obtain the average value */
col = col.div_scalar(col, (float) ns);
col = new_vector(new_point(sqrt(col.point.x), sqrt(col.point.y), sqrt(col.point.z)));
int ir = (int) (255.99 * col.point.x);
int ig = (int) (255.99 * col.point.y);
int ib = (int) (255.99 * col.point.z);
output[i * ny + j] = new_color(ir, ig, ib);
}
}
}
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
int ir = output[i * ny + j].r;
int ig = output[i * ny + j].g;
int ib = output[i * ny + j].b;
fprintf(fout, "%d %d %d\n", ir, ig, ib);
}
}
struct timespec end_time;
clock_gettime(CLOCK_MONOTONIC, &end_time);
struct timespec elapsed = diff_time(start_time, end_time);
double elapsed_time = elapsed.tv_sec + (double) elapsed.tv_nsec / 1000000000.0;
printf("Elapsed: %f seconds\n", elapsed_time);
fclose(fin);
fclose(fout);
for (int i = 0; i < sphere_count; i++) {
Sphere *sphere = (Sphere *) list[i]->inst;
free(sphere->mat_ptr.inst);
free(sphere);
}
return 0;
}
|
cholesky_omp.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* cholesky.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "util.h"
#include "papi.h"
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "cholesky.h"
double **I, **O;
double **Aux;
int size;
int nthreads;
int opt;
/* Array initialization. */
static void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)){
I = (double **)malloc(n * sizeof(double));
O = (double **)calloc(n, sizeof(double));
Aux = (double **)malloc(n * sizeof(double));
int i, j;
for (i = 0; i < n; i++){
I[i] = (double*)malloc(n * sizeof(double));
O[i] = (double*)calloc(n, sizeof(double));
Aux[i] = (double*)malloc(n * sizeof(double));
for (j = 0; j <= i; j++){
A[i][j] = (DATA_TYPE)(-j % n) / n + 1;
I[i][j] = (DATA_TYPE)(-j % n) / n + 1;
// O[i][j] = (DATA_TYPE)(-j % n) / n + 1;
}
for (j = i+1; j < n; j++) {
A[i][j] = 0;
I[i][j] = A[i][j];
}
A[i][i] = 1;
I[i][i] = A[i][i];
}
/* Make the matrix positive semi-definite. */
int r,s,t;
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
Aux[r][s] = 0;
for (t = 0; t < n; ++t)
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
Aux[r][s] += I[r][t] * I[s][t];
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s){
I[r][s] = Aux[r][s];
}
free2D(Aux);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)){
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("A");
for (i = 0; i < n; i++)
for (j = 0; j <= i; j++) {
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, A[i][j]);
}
POLYBENCH_DUMP_END("A");
POLYBENCH_DUMP_FINISH;
}
static void cholesky_row_lower(){
int i, j, k;
for(k = 0; k < size; k++){
I[k][k] = sqrtf(I[k][k]);
for(j = (k + 1); j < size; j++){
I[k][j] /= I[k][k];
I[j][k] = I[k][j];
}
#pragma omp parallel for shared(I) private(i,j) num_threads(nthreads)
for(i = (k + 1); i < size; i++){
for(j = i; j < size; j++){
I[i][j] -= I[k][i] * I[k][j];
I[j][i] = I[i][j];
}
}
}
for(i = 0; i < size; i++){
for(j = i + 1; j < size; j++){
I[i][j] = 0.0;
}
}
}
static void cholesky_crout(){
int i, j, k;
double sum;
#pragma scop
for (j = 0; j < size; j++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += O[j][k] * O[j][k];
}
O[j][j] = SQRT_FUN(I[j][j] - sum);
#pragma omp parallel for private(i, k, sum) shared(I, O, j) num_threads(nthreads)
for (i = j + 1; i < size; i++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += O[i][k] * O[j][k];
}
O[i][j] = (1.0 / O[j][j] * (I[i][j] - sum));
}
}
#pragma endscop
}
int main(int argc, char** argv){
if(argc < 2){
printf("The program must have an argument to be executed. ./cholesky_omp.out $nthreads\n");
return -1;
}
nthreads = atoi(argv[1]);
opt = atoi(argv[2]);
/* Retrieve problem size. */
size = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, size, size);
/* Initialize array(s). */
init_array(size, POLYBENCH_ARRAY(A));
/* Start timer. */
polybench_start_instruments;
// printMatrix(I, size);
int ret;
int c1[3] = {PAPI_L1_TCM, PAPI_L2_TCM, PAPI_L3_TCM};
int c2[2] = {PAPI_TOT_CYC, PAPI_TOT_INS};
long long v1[3], v2[2];
// int counters[2] = {PAPI_TOT_CYC, PAPI_TOT_INS}, ret;
if(opt == 1){
if ((ret = PAPI_start_counters(c1, 3)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret));
exit(1);
}
}
else{
if ((ret = PAPI_start_counters(c2, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret));
exit(1);
}
}
BEGINTIME();
/* Run kernel. */
cholesky_row_lower();
// kernel_cholesky (n, POLYBENCH_ARRAY(A));
printf("ELAPSED TIME: ");
ENDTIME();
// printMatrix(I, size);
if(opt == 1){
if ((ret = PAPI_read_counters(v1, 3)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret));
exit(1);
}
printf("TOTAL L1 MISS: %lld\n", v1[0]);
printf("TOTAL L2 MISS: %lld\n", v1[1]);
printf("TOTAL L3 MISS: %lld\n", v1[2]);
}
else{
if ((ret = PAPI_read_counters(v2, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret));
exit(1);
}
printf("TOTAL CLOCK CYCLES: %lld\n", v2[0]);
printf("TOTAL INSTRUCTIONS: %lld\n", v2[1]);
}
printf("--------------------------------------\n");
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(size, POLYBENCH_ARRAY(A)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
free2D(I);
free2D(O);
return 0;
}
|
area.omp.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
# define NPOINTS 4000
# define MAXITER 4000
struct complex{
double real;
double imag;
};
int main(){
int i, j, iter, numoutside = 0;
int num_threads;
double area, error, ztemp;
struct complex z, c;
double start, end, cpu_time_used;
start = omp_get_wtime();
/*
*
*
* Outer loops run over npoints, initialise z=c
*
* Inner loop has the iteration z=z*z+c, and threshold test
*/
#pragma omp parallel for default(shared) private(i,j,c,z,ztemp,iter) reduction(+:numoutside) collapse(2)
for (i=0; i<NPOINTS; i++) {
for (j=0; j<NPOINTS; j++) {
c.real = -2.0+2.5*(double)(i)/(double)(NPOINTS)+1.0e-7;
c.imag = 1.125*(double)(j)/(double)(NPOINTS)+1.0e-7;
z=c;
for (iter=0; iter<MAXITER; iter++){
ztemp=(z.real*z.real)-(z.imag*z.imag)+c.real;
z.imag=z.real*z.imag*2+c.imag;
z.real=ztemp;
if ((z.real*z.real+z.imag*z.imag)>4.0e0) {
numoutside++;
break;
}
}
}
}
end = omp_get_wtime();
cpu_time_used = ((double) (end - start));
#pragma omp parallel
{
num_threads = omp_get_num_threads();
}
/*
* Calculate area and error and output the results
*/
area=2.0*2.5*1.125*(double)(NPOINTS*NPOINTS-numoutside)/(double)(NPOINTS*NPOINTS);
error=area/(double)NPOINTS;
printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error);
printf("Time taken for calculation: %f on %d threads\n",cpu_time_used,num_threads);
}
|
vect-simd-clone-6.c | /* { dg-require-effective-target vect_simd_clones } */
/* { dg-additional-options "-fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include "tree-vect.h"
#ifndef N
#define N 1024
#endif
int a[N];
long long int b[N];
short c[N];
#pragma omp declare simd
#pragma omp declare simd uniform(b) linear(c:3)
__attribute__((noinline)) short
foo (int a, long long int b, short c)
{
return a + b + c;
}
__attribute__((noinline, noclone)) void
bar (int x)
{
int i;
if (x == 0)
{
#pragma omp simd
for (i = 0; i < N; i++)
c[i] = foo (a[i], b[i], c[i]);
}
else
{
#pragma omp simd
for (i = 0; i < N; i++)
c[i] = foo (a[i], x, i * 3);
}
}
__attribute__((noinline, noclone)) void
baz (void)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = 2 * i;
b[i] = -7 * i + 6;
c[i] = (i & 31) << 4;
}
}
int
main ()
{
int i;
check_vect ();
baz ();
bar (0);
for (i = 0; i < N; i++)
if (a[i] != 2 * i || b[i] != 6 - 7 * i
|| c[i] != 6 - 5 * i + ((i & 31) << 4))
abort ();
else
a[i] = c[i];
bar (17);
for (i = 0; i < N; i++)
if (a[i] != 6 - 5 * i + ((i & 31) << 4)
|| b[i] != 6 - 7 * i
|| c[i] != 23 - 2 * i + ((i & 31) << 4))
abort ();
return 0;
}
|
sm_routing_funcs.c | /* BEGIN_ICS_COPYRIGHT7 ****************************************
Copyright (c) 2015-2020, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
** END_ICS_COPYRIGHT7 ****************************************/
/* [ICS VERSION STRING: unknown] */
#include "ib_types.h"
#include "sm_l.h"
#include "sm_qos.h"
#include "sa_l.h"
#include "sm_dbsync.h"
#include "fm_xml.h"
// Internal Helper Functions
typedef struct GuidCounter
{
uint64_t guid;
uint32_t offset;
uint32_t count;
} GuidCounter_t;
static int
_compare_guids(const void * arg1, const void * arg2)
{
SwitchportToNextGuid_t * sport1 = (SwitchportToNextGuid_t *)arg1;
SwitchportToNextGuid_t * sport2 = (SwitchportToNextGuid_t *)arg2;
if (sport1->guid < sport2->guid)
return -1;
else if (sport1->guid > sport2->guid)
return 1;
else
return 0;
}
static int
_compare_lids_routed(const void * arg1, const void * arg2)
{
SwitchportToNextGuid_t * sport1 = (SwitchportToNextGuid_t *)arg1;
SwitchportToNextGuid_t * sport2 = (SwitchportToNextGuid_t *)arg2;
if (sport1->portp->portData->lidsRouted < sport2->portp->portData->lidsRouted)
return -1;
else if (sport1->portp->portData->lidsRouted > sport2->portp->portData->lidsRouted)
return 1;
else if (sport1->nextSwp->numLidsRouted < sport2->nextSwp->numLidsRouted)
return -1;
else if (sport1->nextSwp->numLidsRouted > sport2->nextSwp->numLidsRouted)
return 1;
else
return 0;
}
static int
_compare_guids_then_lids_routed(const void * arg1, const void * arg2)
{
SwitchportToNextGuid_t * sport1 = (SwitchportToNextGuid_t *)arg1;
SwitchportToNextGuid_t * sport2 = (SwitchportToNextGuid_t *)arg2;
if (sport1->guid < sport2->guid)
return -1;
else if (sport1->guid > sport2->guid)
return 1;
else if (sport1->portp->portData->lidsRouted < sport2->portp->portData->lidsRouted)
return -1;
else if (sport1->portp->portData->lidsRouted > sport2->portp->portData->lidsRouted)
return 1;
else if (sport1->nextSwp->numLidsRouted < sport2->nextSwp->numLidsRouted)
return -1;
else if (sport1->nextSwp->numLidsRouted > sport2->nextSwp->numLidsRouted)
return 1;
else
return 0;
}
static int
_compare_lids_routed_then_guids(const void * arg1, const void * arg2)
{
SwitchportToNextGuid_t * sport1 = (SwitchportToNextGuid_t *)arg1;
SwitchportToNextGuid_t * sport2 = (SwitchportToNextGuid_t *)arg2;
if (sport1->portp->portData->lidsRouted < sport2->portp->portData->lidsRouted)
return -1;
else if (sport1->portp->portData->lidsRouted > sport2->portp->portData->lidsRouted)
return 1;
else if (sport1->nextSwp->numLidsRouted < sport2->nextSwp->numLidsRouted)
return -1;
else if (sport1->nextSwp->numLidsRouted > sport2->nextSwp->numLidsRouted)
return 1;
else if (sport1->guid < sport2->guid)
return -1;
else if (sport1->guid > sport2->guid)
return 1;
else if (sport1->portp->index < sport2->portp->index)
return -1;
else if (sport1->portp->index > sport2->portp->index)
return 1;
else
return 0;
}
// -------------------------------------------------------------------------- //
//
// This re-sorts an array of SwitchportToNextGuid_t structures by cycling
// either the NodeGUID or SystemImageGUID. The input is assumed to already
// be numerically sorted by the desired GUID, and temporary data is stored
// in a scratch space specified in the argument list, which must have room
// for up to MaxNumberOfSwitchPorts * sizeof(GuidCounter_t)
//
// As a superficial example, [1,1,2,2,2,3,3,3] would become [1,2,3,1,2,3,2,3]
//
static void
_guid_cycle_sort(SwitchportToNextGuid_t *ports, int portCount, void *scratch, int useSysGuid)
{
int i, j;
int uniqueGuids = 0;
GuidCounter_t *gc = (GuidCounter_t*)scratch;
SwitchportToNextGuid_t tmpPort;
uint64_t prevGuid = 0;
uint16_t sortIndex = 0;
// any numerically ordered 1 or 2 element array is already done
if (portCount <= 2)
return;
// collect info about how many of each guid there are, and
// where in the array they are located
//
for (i = 0; i < portCount; i++) {
if ((useSysGuid ? ports[i].sysGuid : ports[i].guid) != prevGuid) {
gc[uniqueGuids].guid = useSysGuid ? ports[i].sysGuid : ports[i].guid;
gc[uniqueGuids].offset = i;
gc[uniqueGuids].count = 1;
uniqueGuids++;
} else {
gc[uniqueGuids-1].count++;
}
ports[i].sortIndex = 0;
prevGuid = useSysGuid ? ports[i].sysGuid : ports[i].guid;
}
// easy out scenarios: can't cycle [1,1,1,1] or [1,2,3,4]
if (uniqueGuids <= 1 || uniqueGuids == portCount)
return;
// sort the array. to preserve existing sort order of remaining
// elements while determining cyclic order, we sort in 2 linear passes.
// the first pass marks each element's final sort position
//
for (i = 0, j = 0; i < portCount; i++, j = (j + 1) % uniqueGuids) {
// cycle to an available guid if necessary
while (gc[j].count == 0) {
j = (j + 1) % uniqueGuids;
}
ports[gc[j].offset++].sortIndex = sortIndex++;
gc[j].count--;
}
// since the value we're sorting on is a final array index, we
// can sort in one pass with < N swaps
for (i = 0; i < portCount; i++) {
// until we have the correct element in this position,
// swap the current element into its final position
while (ports[i].sortIndex != i) {
memcpy(&tmpPort, &ports[i], sizeof(SwitchportToNextGuid_t));
memcpy(&ports[i], &ports[tmpPort.sortIndex], sizeof(SwitchportToNextGuid_t));
memcpy(&ports[tmpPort.sortIndex], &tmpPort, sizeof(SwitchportToNextGuid_t));
}
}
}
typedef struct {
int matching; // true if we've found spines and are only
// considering them for routing
} SpineFirstState_t;
typedef enum {
SPINE_FIRST_NONE, // disabled or not matching
SPINE_FIRST_FIRST, // first match found
SPINE_FIRST_MATCH, // currently matching; match found
SPINE_FIRST_NOMATCH // currently matching; no match found
} SpineFirstResult_t;
static __inline__ SpineFirstResult_t
_spine_first_test(SpineFirstState_t *state, Node_t *switchp, Port_t *portp, Node_t *next_nodep)
{
if (switchp->nodeInfo.SystemImageGUID != next_nodep->nodeInfo.SystemImageGUID &&
!portp->portData->uplink) {
if (state->matching)
// previously found spine but this isn't one; no match
return SPINE_FIRST_NOMATCH;
else
// have not previously found a spine and this isn't one
// either; keep returning none
return SPINE_FIRST_NONE;
}
// sysimageguid matches or marked as uplink; this is a "spine"
if (state->matching) return SPINE_FIRST_MATCH;
// haven't matched yet; this is the first spine
state->matching = 1;
return SPINE_FIRST_FIRST;
}
static void
_balance_ports(Node_t *switchp, SwitchportToNextGuid_t *ordered_ports, int olen)
{
int i, j;
#ifdef __VXWORKS__
GuidCounter_t *scratch = (GuidCounter_t *)(ordered_ports + olen);
#else
GuidCounter_t scratch[MAX_STL_PORTS] = {{0}};
#endif /* __VXWORKS__ */
if (olen <= 0) return;
if (switchp->internalLinks) {
// no reason to sort on guid since only single link to each remote switch
qsort(ordered_ports, olen, sizeof(SwitchportToNextGuid_t),
_compare_lids_routed);
} else {
qsort(ordered_ports, olen, sizeof(SwitchportToNextGuid_t),
_compare_guids_then_lids_routed);
_guid_cycle_sort(ordered_ports, olen, (void*)(scratch), FALSE);
for (i = 0, j = 0; i < olen; i++) {
if (i == olen - 1 || ordered_ports[i].guid > ordered_ports[i+1].guid) {
qsort(&ordered_ports[j], i - j + 1, sizeof(SwitchportToNextGuid_t),
_compare_lids_routed_then_guids);
j = i + 1;
}
}
}
}
static Status_t
_handle_preassigned_sl(RoutingModule_t *rm, VirtualFabrics_t *vfs, bitset_t *usedSLs, int *numSCs)
{
int qos;
Status_t ret = VSTATUS_OK;
for (qos=0; qos < vfs->number_of_qos_all; qos++) {
QosConfig_t *pQos = &vfs->qos_all[qos];
if (!pQos->qos_enable) {
// Ignore any SLs set on nonQos VFs
// Don't warn here. Config parser should have warned.
if (pQos->base_sl != UNDEFINED_XML8) {
pQos->base_sl = UNDEFINED_XML8;
}
if (pQos->resp_sl != UNDEFINED_XML8) {
pQos->resp_sl = UNDEFINED_XML8;
}
if (pQos->mcast_sl != UNDEFINED_XML8) {
pQos->mcast_sl = UNDEFINED_XML8;
}
continue;
}
if (pQos->base_sl != UNDEFINED_XML8) {
if(!bitset_test(usedSLs, pQos->base_sl))
*numSCs += rm->funcs.num_routing_scs(pQos->base_sl, 0);
bitset_set(usedSLs, pQos->base_sl);
if (rm->funcs.mcast_isolation_required() && pQos->contains_mcast) {
if (pQos->mcast_sl == UNDEFINED_XML8) {
IB_LOG_ERROR_FMT(__func__,
"QOSGroup %s: Routing algorithm %s requires multicast isolation, but MulticastSL not configured",
pQos->name, rm->name);
ret = VSTATUS_BAD;
} else if (pQos->mcast_sl == pQos->base_sl) {
IB_LOG_ERROR_FMT(__func__,
"QOSGroup %s: Routing algorithm %s requires multicast isolation, but MulticastSL matches BaseSL",
pQos->name, rm->name);
ret = VSTATUS_BAD;
}
}
}
if (pQos->resp_sl != UNDEFINED_XML8) {
if(!bitset_test(usedSLs, pQos->resp_sl))
*numSCs += rm->funcs.num_routing_scs(pQos->resp_sl, 0);
bitset_set(usedSLs, pQos->resp_sl);
}
if (pQos->mcast_sl != UNDEFINED_XML8) {
if(!bitset_test(usedSLs, pQos->mcast_sl))
*numSCs += rm->funcs.num_routing_scs(pQos->mcast_sl, 1);
bitset_set(usedSLs, pQos->mcast_sl);
if (!rm->funcs.mcast_isolation_required() && pQos->base_sl != pQos->mcast_sl) {
IB_LOG_WARN_FMT(__func__,
"QOSGroup %s: Including configured MulticastSL %d, although routing algorithm %s doesn't require multicast isolation",
pQos->name, (unsigned)pQos->mcast_sl, rm->name);
}
}
}
return ret;
}
static Status_t
_handle_unassigned_sl(QosConfig_t *pQos, uint8_t *sl, bitset_t *usedSLs, int *noqos, boolean highsls,
int maxsl, char *text, int *numSCs, int scspersl)
{
int newsl;
if (*sl == UNDEFINED_XML8) {
if (pQos->qos_enable || *noqos == -1){
if (!highsls) {
newsl = bitset_find_first_zero(usedSLs);
if (newsl >= maxsl) {
IB_LOG_ERROR_FMT(__func__, "QOSGroup %s: no unused SLs < %d available for %s",
pQos->name, maxsl, text);
return VSTATUS_BAD;
}
} else {
newsl = bitset_find_last_zero(usedSLs);
if (newsl == -1) {
IB_LOG_ERROR_FMT(__func__, "QOSGroup: %s no unused SLs available for %s",
pQos->name, text);
return VSTATUS_BAD;
}
}
bitset_set(usedSLs, newsl);
*numSCs += scspersl;
*sl = newsl;
if (!pQos->qos_enable) *noqos = newsl;
} else {
*sl = *noqos;
}
}
return VSTATUS_OK;
}
// The following functions are defined in sm_qos.c. They are
// particular to default implementation and so not exposed
// in sm_l.h
extern Status_t sm_update_bw(RoutingModule_t *rm, VirtualFabrics_t *VirtualFabrics);
extern Status_t sm_assign_scs_to_sls_FixedMap(RoutingModule_t *rm, VirtualFabrics_t *VirtualFabrics);
extern Status_t sm_assign_scs_to_sls_NonFixedMap(RoutingModule_t *rm, VirtualFabrics_t *VirtualFabrics);
// Routing Functions available to any routing algorithm
Status_t
sm_routing_func_pre_process_discovery_noop(Topology_t *topop, void **outContext)
{
return VSTATUS_OK;
}
Status_t
sm_routing_func_discover_node_noop(Topology_t *topop, Node_t *nodep, void *context)
{
return VSTATUS_OK;
}
Status_t
sm_routing_func_discover_node_port_noop(Topology_t *topop, Node_t *nodep, Port_t *portp, void *context)
{
return VSTATUS_OK;
}
Status_t
sm_routing_func_post_process_discovery_noop(Topology_t *topop, Status_t discoveryStatus, void *context)
{
return VSTATUS_OK;
}
Status_t
sm_routing_func_post_process_routing_noop(Topology_t *topop, Topology_t *old_topop, int *rebalance)
{
return VSTATUS_OK;
}
Status_t
sm_routing_func_post_process_routing_copy_noop(Topology_t *src_topop, Topology_t *dst_topop, int *rebalance)
{
return VSTATUS_OK;
}
Status_t
sm_routing_func_alloc_cost_matrix_floyds(Topology_t *topop)
{
Status_t status;
size_t req_bytesCost, req_bytesPath;
/* Allocate space for the cost and path matrix. */
req_bytesCost = topop->max_sws * topop->max_sws * sizeof(uint16_t);
if (req_bytesCost > topop->bytesCost) {
topop->bytesCost = 0;
if (topop->cost != NULL) {
(void)vs_pool_free(&sm_pool, (void *)topop->cost);
topop->cost = NULL;
}
status = vs_pool_alloc(&sm_pool, req_bytesCost, (void *)&topop->cost);
if (status != VSTATUS_OK) {
IB_LOG_ERRORRC("can't malloc cost array rc:", status);
IB_EXIT(__func__, status);
return status;
}
topop->bytesCost = req_bytesCost;
}
req_bytesPath = SM_PATH_SIZE(topop->max_sws);
if (req_bytesPath > topop->bytesPath) {
topop->bytesPath = 0;
if (topop->path != NULL) {
(void)vs_pool_free(&sm_pool, (void *)topop->path);
topop->path = NULL;
}
status = vs_pool_alloc(&sm_pool, req_bytesPath, (void *)&topop->path);
if (status != VSTATUS_OK) {
IB_LOG_ERRORRC("can't malloc path array rc:", status);
IB_EXIT(__func__, status);
return status;
}
memset(topop->path, 0, req_bytesPath);
topop->bytesPath = req_bytesPath;
}
return VSTATUS_OK;
}
Status_t
sm_routing_func_init_cost_matrix_floyds(Topology_t *topop)
{
int i, j, k, ij, ik, ki, iNumNodes;
Node_t *nodep, *neighborNodep;
Port_t *portp;
/* Set initial values. */
for (i = 0, iNumNodes = 0; i < topop->max_sws; i++, iNumNodes += topop->max_sws) {
for (j = 0, ij = iNumNodes; j <= i; j++, ij++) {
topop->cost[ij] = topop->cost[Index(j,i)] = Cost_Infinity;
}
}
/* Set the costs for known edges. */
for_all_switch_nodes(topop, nodep) {
i = nodep->swIdx;
for_all_physical_ports(nodep, portp) {
if (sm_valid_port(portp) && portp->state > IB_PORT_DOWN) {
k = portp->nodeno;
neighborNodep = sm_find_node(topop, k);
if (neighborNodep == NULL) {
IB_LOG_WARN("Node not found, can't adjust cost from node index", k);
continue;
} else if (neighborNodep->nodeInfo.NodeType != NI_TYPE_SWITCH) {
/* we do not use end nodes in the algorithm, just switches */
continue;
} else {
/* k is the switch's index in the switch list, not index in node list */
k = neighborNodep->swIdx;
}
ik = Index(i, k);
ki = Index(k, i);
topop->cost[ik] = topop->cost[ki] = MIN(topop->cost[ik], sm_GetCost(portp->portData));
sm_path_portmask_set(topop->path + ik, portp->index);
sm_path_portmask_set(topop->path + ki, portp->portno);
}
}
}
/* Clear the costs for each node to itself. */
for (i = 0, ij = 0; i < topop->max_sws; i++, ij += topop->max_sws + 1) {
topop->cost[ij] = 0;
}
return VSTATUS_OK;
}
Status_t
sm_routing_func_calc_cost_matrix_floyds(Topology_t * topop, int switches, unsigned short * cost, SmPathPortmask_t * path)
{
int i, j, k;
int ij, ik, kj, oldik;
int kNumNodes, iNumNodes, oldiNumNodes;
unsigned short value;
unsigned int total_cost = 0;
unsigned int leastTotalCost = 0;
unsigned int max_cost = 0;
unsigned int leastWorstCaseCost = 0;
uint64_t leastWorstCostSwitchGuid = 0;
uint64_t leastTotalCostSwitchGuid = 0;
unsigned int currRootTotalCost = 0;
unsigned int currRootWorstCaseCost = 0;
int curr_selection_sm_neighbor = 0;
uint8_t old_root_exists = 0;
Node_t *nodep;
Node_t *sw = topop->switch_head;
if (switches != old_topology.max_sws || topology_passcount == 0) {
topology_cost_path_changes = 1;
}
for (k = 0, kNumNodes = 0; k < switches; k++, kNumNodes += switches) {
#ifndef __VXWORKS__
#pragma omp parallel for private(j, ij, ik, kj, value) shared(cost, path)
#endif
for (i = 0; i < switches; ++i) {
ik = Index(i, k);
if (cost[ik] == Cost_Infinity) {
continue;
}
for (j = i + 1, ij = Index(i, j), kj = kNumNodes + i + 1; j < switches; ++j, ++ij, ++kj) {
if ((value = cost[ik] + cost[kj]) < cost[ij]) {
cost[ij] = value;
// Take advantage of the fact that cost array is symmetric
cost[Index(j, i)] = value;
path[ij] = path[ik];
path[Index(j, i)] = path[Index(j, k)];
} else if (value == cost[ij]) {
sm_path_portmask_merge(path + ij, path + ik);
sm_path_portmask_merge(path + Index(j, i), path + Index(j, k));
}
}
}
if (smDebugPerf && (k & 0xFF) == 0xFF) {
IB_LOG_INFINI_INFO_FMT(__func__, "completed run %d of %d", k, switches);
}
}
/* All floyd costs are fully computed now and can be analyzed */
while(sw) {
total_cost = 0;
max_cost = 0;
k = sw->swIdx;
for (i = 0, iNumNodes = 0, oldiNumNodes = 0; i < switches;
i++, iNumNodes += switches, oldiNumNodes += old_topology.max_sws) {
ik = iNumNodes + k;
if (i == k) {
continue;
}
if (sm_useIdealMcSpanningTreeRoot && (cost[Index(k, i)] < Cost_Infinity)) {
/* Calculate costs of switches to determine the best MC spanning tree root. */
if (sm_mcSpanningTreeRoot_useLeastTotalCost) {
total_cost += cost[Index(k, i)];
} else if (sm_mcSpanningTreeRoot_useLeastWorstCaseCost) {
if (cost[Index(k, i)] > max_cost)
max_cost = cost[Index(k, i)];
}
}
/* PR 115770. If there is any switch cost/path change (including removal of switches),
* set topology_cost_path_changes which will force full LFT reprogramming for all switches.
*/
if (topology_cost_path_changes)
continue;
if ((k >= old_topology.max_sws) || (i >= old_topology.max_sws))
continue;
oldik = oldiNumNodes + k;
if (old_topology.cost[oldik] != cost[ik]) {
topology_cost_path_changes = 1;
}
}
if (sm_useIdealMcSpanningTreeRoot) {
if (sw && sw->nodeInfo.NodeGUID == sm_mcSpanningTreeRootGuid) {
/* Note the current cost of the root switch. Its cost could have changed
* due to topology changes.
*/
if (sm_mcSpanningTreeRoot_useLeastTotalCost)
currRootTotalCost = total_cost;
if (sm_mcSpanningTreeRoot_useLeastWorstCaseCost)
currRootWorstCaseCost = max_cost;
}
if (sm_mcSpanningTreeRoot_useLeastTotalCost) {
if ((leastTotalCost == 0 || total_cost < leastTotalCost)) {
leastTotalCost = total_cost;
if (sw) {
leastTotalCostSwitchGuid = sw->nodeInfo.NodeGUID;
if (sw->swIdx == 0)
curr_selection_sm_neighbor = 1;
else
curr_selection_sm_neighbor = 0;
}
} else if (curr_selection_sm_neighbor && (total_cost == leastTotalCost)) {
/* prefer non SM neighbor when costs are same */
if (sw && (sw->swIdx != 0)) {
leastTotalCostSwitchGuid = sw->nodeInfo.NodeGUID;
curr_selection_sm_neighbor = 0;
}
}
}
if (sm_mcSpanningTreeRoot_useLeastWorstCaseCost) {
if (leastWorstCaseCost == 0 || max_cost < leastWorstCaseCost) {
leastWorstCaseCost = max_cost;
if (sw) {
leastWorstCostSwitchGuid = sw->nodeInfo.NodeGUID;
if (sw->swIdx == 0)
curr_selection_sm_neighbor = 1;
else
curr_selection_sm_neighbor = 0;
}
} else if (curr_selection_sm_neighbor && (max_cost == leastWorstCaseCost)) {
/* prefer non SM neighbor when costs are same */
if (sw && (sw->swIdx != 0)) {
leastWorstCostSwitchGuid = sw->nodeInfo.NodeGUID;
curr_selection_sm_neighbor = 0;
}
}
}
}
sw = sw->type_next;
}
if (!sm_useIdealMcSpanningTreeRoot)
return VSTATUS_OK;
/* Based on the calculations, select the MC Spanning Tree Root Switch */
old_root_exists = 0;
if (sm_mcSpanningTreeRootGuid) {
/* If we identified a root in a previous sweep or if we have just take over
* from a master, does the old root still exist ?*/
if (sm_find_guid(topop, sm_mcSpanningTreeRootGuid))
old_root_exists = 1;
}
if (sm_mcSpanningTreeRoot_useLeastTotalCost) {
if (!old_root_exists) {
if (smDebugPerf && sm_mcSpanningTreeRootGuid) {
IB_LOG_INFINI_INFO_FMT(__func__,
"MC Spanning tree root switch disappeared, changing to new one.");
}
if (vs_lock(&sm_mcSpanningTreeRootGuidLock) != VSTATUS_OK) {
IB_LOG_ERROR0("error in getting mcSpanningTreeRootGuidLock");
return VSTATUS_OK;
}
sm_mcSpanningTreeRootGuid = leastTotalCostSwitchGuid;
(void)vs_unlock(&sm_mcSpanningTreeRootGuidLock);
currRootTotalCost = leastTotalCost;
if (smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__,
"MC Spanning Tree Root switch selected. Guid "FMT_U64, sm_mcSpanningTreeRootGuid);
}
} else {
/* change root only if the delta between current root cost and
* the newly identified switch's cost is greater than the threshold */
if (currRootTotalCost && (leastTotalCost < currRootTotalCost)) {
unsigned int delta = currRootTotalCost - leastTotalCost;
if (((delta*100)/currRootTotalCost) >= sm_mcRootCostDeltaThreshold) {
if (smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Changing MC Spanning Tree Root Switch. "
"Least total cost Old Root %d New Root %d. Delta is above threshold value of %d%%.",
currRootTotalCost, leastTotalCost, sm_mcRootCostDeltaThreshold);
}
if (vs_lock(&sm_mcSpanningTreeRootGuidLock) != VSTATUS_OK) {
IB_LOG_ERROR0("error in getting mcSpanningTreeRootGuidLock");
return VSTATUS_OK;
}
sm_mcSpanningTreeRootGuid = leastTotalCostSwitchGuid;
(void)vs_unlock(&sm_mcSpanningTreeRootGuidLock);
currRootTotalCost = leastTotalCost;
} else if (smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Not changing MC root switch. "
"Delta of Current root cost %d new least total cost %d below threshold value of %d%%",
currRootTotalCost, leastTotalCost, sm_mcRootCostDeltaThreshold);
}
}
}
}
if (sm_mcSpanningTreeRoot_useLeastWorstCaseCost) {
if (!old_root_exists) {
if (smDebugPerf && sm_mcSpanningTreeRootGuid) {
IB_LOG_INFINI_INFO_FMT(__func__,
"MC Spanning tree root switch disappeared, changing to new one.");
}
if (vs_lock(&sm_mcSpanningTreeRootGuidLock) != VSTATUS_OK) {
IB_LOG_ERROR0("error in getting mcSpanningTreeRootGuidLock");
return VSTATUS_OK;
}
sm_mcSpanningTreeRootGuid = leastWorstCostSwitchGuid;
(void)vs_unlock(&sm_mcSpanningTreeRootGuidLock);
currRootWorstCaseCost = leastWorstCaseCost;
if (smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__,
"MC Spanning Tree Root switch selected. Guid "FMT_U64, sm_mcSpanningTreeRootGuid);
}
} else {
/* change root only if the delta between current root cost and
* the newly identified switch's cost is greater than the threshold */
if (currRootWorstCaseCost && (leastWorstCaseCost < currRootWorstCaseCost)) {
unsigned int delta = currRootWorstCaseCost - leastWorstCaseCost;
if (((delta*100)/currRootWorstCaseCost) >= sm_mcRootCostDeltaThreshold) {
if (smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Changing MC Spanning Tree Root Switch."
"Least worst case cost Old Root %d New Root %d. Delta is above threshold value of %d%%.",
currRootWorstCaseCost, leastWorstCaseCost, sm_mcRootCostDeltaThreshold);
}
if (vs_lock(&sm_mcSpanningTreeRootGuidLock) != VSTATUS_OK) {
IB_LOG_ERROR0("error in getting mcSpanningTreeRootGuidLock");
return VSTATUS_OK;
}
sm_mcSpanningTreeRootGuid = leastWorstCostSwitchGuid;
(void)vs_unlock(&sm_mcSpanningTreeRootGuidLock);
currRootWorstCaseCost = leastWorstCaseCost;
} else if (smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Not changing MC root switch. "
"Delta of Current root cost %d new least worst case cost %d below threshold value of %d%%",
currRootWorstCaseCost, leastWorstCaseCost, sm_mcRootCostDeltaThreshold);
}
}
}
}
/* communicate MC Root switch guid to standby SMs*/
(void)sm_dbsync_syncMCRoot(DBSYNC_TYPE_FULL);
if (smDebugPerf) {
int found = 0;
for_all_switch_nodes(topop, nodep) {
if (sm_mcSpanningTreeRoot_useLeastTotalCost &&
(nodep->nodeInfo.NodeGUID == leastTotalCostSwitchGuid)) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Least total cost is %d. Switch is %s Guid "FMT_U64,
leastTotalCost, sm_nodeDescString(nodep), nodep->nodeInfo.NodeGUID);
found++;
}
if (sm_mcSpanningTreeRoot_useLeastWorstCaseCost &&
(nodep->nodeInfo.NodeGUID == leastWorstCostSwitchGuid)) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Least worst case cost is %d. Switch is %s Guid "FMT_U64,
leastWorstCaseCost, sm_nodeDescString(nodep), nodep->nodeInfo.NodeGUID);
found++;
}
if (nodep->nodeInfo.NodeGUID == sm_mcSpanningTreeRootGuid) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Current Multicast Spanning Tree Root is %s Guid "FMT_U64,
sm_nodeDescString(nodep), nodep->nodeInfo.NodeGUID);
if (currRootTotalCost) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Current Spanning Tree Root Total cost is %d ", currRootTotalCost);
}
if (currRootWorstCaseCost) {
IB_LOG_INFINI_INFO_FMT(__func__,
"Current Spanning Tree Root Least worst case cost is %d ", currRootWorstCaseCost);
}
found++;
}
if (found == 3)
break;
}
}
return VSTATUS_OK;
}
int
sm_routing_func_routing_mode_noop(void)
{
return STL_ROUTE_NOP;
}
int
sm_routing_func_routing_mode_linear(void)
{
return STL_ROUTE_LINEAR;
}
boolean
sm_routing_func_false(void)
{
return 0;
}
boolean
sm_routing_func_true(void)
{
return 1;
}
boolean
sm_routing_func_node_false(Topology_t *topop, Node_t *nodep)
{
return 0;
}
STL_LID
sm_routing_func_get_reserved_lid(Topology_t * topop, Node_t * nodep, const Port_t* portp)
{
return STL_LID_RESERVED;
}
Status_t
sm_routing_func_copy_routing_noop(Topology_t *src_topop, Topology_t *dst_topop)
{
return VSTATUS_OK;
}
Status_t
sm_routing_func_copy_routing_lfts(Topology_t *src_topop, Topology_t *dst_topop)
{
Status_t status;
Node_t *nodep, *oldNodep;
int lftLength;
for_all_switch_nodes(dst_topop, nodep) {
if (nodep->switchInfo.LinearFDBCap == 0) {
IB_LOG_ERROR_FMT(__func__, "switch doesn't support lft: %s",
sm_nodeDescString(nodep));
continue;
}
oldNodep = sm_find_guid(src_topop, nodep->nodeInfo.NodeGUID);
if ( oldNodep == NULL || oldNodep->lft == NULL
|| nodep->switchInfo.LinearFDBTop != oldNodep->switchInfo.LinearFDBTop) {
if (sm_config.sm_debug_routing)
IB_LOG_INFINI_INFO_FMT(__func__, "Full LFT for switch %s on old node checks", sm_nodeDescString(nodep));
status = sm_setup_lft(dst_topop, nodep);
continue;
}
if ( nodep->initPorts.nset_m
|| !bitset_equal(&nodep->activePorts, &oldNodep->activePorts)) {
// Active ports changed or moved, recalculate LFTs for this switch if change involved ISL.
if (dst_topop->routingModule->funcs.handle_fabric_change(dst_topop, oldNodep, nodep)) {
if (sm_config.sm_debug_routing)
IB_LOG_INFINI_INFO_FMT(__func__, "Full LFT on port change for switch %s", sm_nodeDescString(nodep));
status = sm_setup_lft(dst_topop, nodep);
continue;
}
}
// PR-119954: This PR identified a memory leak that resulted in code in sm_routing_route_old_switch() being executed when
// nodep->lft pointed to an lft that was already allocated. This resulted in a new lft being allocated without
// freeing the already allocated lft. The following is similar code. While the following
// code is not expected to be executed when node->lft is not zero, the following code has been added to
// free the lft if node->lft is found to be non NULL. This will insure that no memory leak can occur.
if (nodep->lft) {
if (sm_config.sm_debug_routing)
IB_LOG_INFINI_INFO_FMT(__func__, "new lft - switch %s nodep %p nodep->index %u nodep->lft %p",
sm_nodeDescString(nodep), nodep, nodep->index, nodep->lft);
vs_pool_free(&sm_pool, nodep->lft);
nodep->lft = NULL;
}
lftLength = sizeof(PORT) * ROUNDUP(nodep->switchInfo.LinearFDBTop+1,MAX_LFT_ELEMENTS_BLOCK);
if ((status = vs_pool_alloc(&sm_pool, lftLength, (void *)&nodep->lft)) != VSTATUS_OK) {
IB_FATAL_ERROR_NODUMP("default_copy_routing_lfts: CAN'T ALLOCATE SPACE FOR NODE'S LFT; OUT OF MEMORY IN SM MEMORY POOL! TOO MANY NODES!!");
return status;
}
memcpy((void *)nodep->lft, (void *)oldNodep->lft, lftLength);
// Recover lidsRouted
if (nodep->oldExists) {
Node_t *oldnodep = nodep->old;
if (oldnodep) {
Port_t *portp, *oldportp;
if (!nodep->numLidsRouted) nodep->numLidsRouted = oldnodep->numLidsRouted;
for_all_ports(nodep, portp) {
if (sm_valid_port(portp)) {
oldportp = sm_get_port(oldnodep, portp->index);
if (sm_valid_port(oldportp)) {
portp->portData->lidsRouted = oldportp->portData->lidsRouted;
/* PR: 143104 Get numLidsRouted from the next Switch, if it existed in the old topology.
* Note the NodeGUID comparison below is a sanity check */
Node_t *nextSwp = sm_find_node(dst_topop, portp->nodeno);
if (nextSwp && (nextSwp->nodeInfo.NodeType == NI_TYPE_SWITCH) && nextSwp->oldExists) {
Node_t *oldnextSwp = nextSwp->old;
if (!nextSwp->numLidsRouted && (nextSwp->nodeInfo.NodeGUID == oldnextSwp->nodeInfo.NodeGUID))
nextSwp->numLidsRouted = oldnextSwp->numLidsRouted;
}
}
}
}
}
}
if (nodep->arSupport) {
if (nodep->pgt && oldNodep->pgt) {
memcpy((void *)nodep->pgt, (void *)oldNodep->pgt, sizeof(STL_PORTMASK)*(nodep->switchInfo.PortGroupCap));
nodep->pgtLen = oldNodep->pgtLen;
}
if (nodep->arSupport && oldNodep->pgft) {
sm_Node_copy_pgft(nodep, oldNodep);
}
sm_Node_prune_portgroups(nodep);
}
if ( new_endnodesInUse.nset_m
|| src_topop->num_endports != dst_topop->num_endports) {
// End node change, calculate deltas LFT changes to switch.
if (sm_config.sm_debug_routing)
IB_LOG_INFINI_INFO_FMT(__func__, "Delta route calc for additional endports for switch %s",
sm_nodeDescString(nodep));
status = sm_setup_lft_deltas(src_topop, dst_topop, nodep);
} else if (sm_config.sm_debug_routing) {
IB_LOG_INFINI_INFO_FMT(__func__, "Copied LFTs for switch %s",
sm_nodeDescString(nodep));
}
}
status = sm_dispatch_wait(&sm_asyncDispatch);
if (status != VSTATUS_OK) {
sm_dispatch_clear(&sm_asyncDispatch);
IB_LOG_ERROR_FMT(__func__,
"Failed to wait for dispatcher completion (rc %d)", status);
return status;
}
return VSTATUS_OK;
}
Status_t
sm_routing_func_init_switch_routing_lfts(Topology_t * topop, int * routing_needed, int * rebalance)
{
Status_t s = VSTATUS_OK;
// Only work on sm_topop/sm_newTopology for now
if (topop != sm_topop)
return VSTATUS_BAD;
if (topology_cost_path_changes || *rebalance) {
// A topology change was indicated. Re-calculate lfts with big hammer (rebalance).
// If not, copy and delta updates handled by main topology method.
s = sm_calculate_all_lfts(topop);
*rebalance = 1;
routing_recalculated = 1;
}
return s;
}
Status_t
sm_routing_func_calculate_lft(Topology_t * topop, Node_t * switchp)
{
return sm_calculate_lft(topop, switchp);
}
// -------------------------------------------------------------------------- //
//
// This is a common routine used by the LFT and RFT routines to parse
// out what the path is through the fabric in order to setup the routing
// tables.
// See sm_l.h for parameter documentation
Status_t
sm_routing_func_setup_xft(Topology_t *topop, Node_t *switchp, Node_t *nodep, Port_t *orig_portp, uint8_t *portnos)
{
int i, j;
uint8_t numLids;
int lidsRoutedInc;
int offset=0;
int end_port = 0;
#ifdef __VXWORKS__
SwitchportToNextGuid_t *ordered_ports = (SwitchportToNextGuid_t *)topop->pad;
memset(ordered_ports, 0, (sizeof(SwitchportToNextGuid_t) + sizeof(GuidCounter_t)) * switchp->nodeInfo.NumPorts);
#else
SwitchportToNextGuid_t ordered_ports[MAX_STL_PORTS] = {{0}};
#endif /* __VXWORKS__ */
IB_ENTER(__func__, switchp, nodep, orig_portp, 0);
numLids = 1 << orig_portp->portData->lmc;
lidsRoutedInc = ((nodep->nodeInfo.NodeType != NI_TYPE_SWITCH) ? 1 : 0);
memset((void*)portnos, 0xff, sizeof(uint8_t) * numLids);
//
// If this is a FI, then we look for a path to the
// switch it is connected to.
//
i = switchp->swIdx;
if (nodep->nodeInfo.NodeType == NI_TYPE_SWITCH) {
j = nodep->swIdx;
} else {
Node_t *ntp = sm_find_node(topop, orig_portp->nodeno);
if (ntp) {
j = ntp->swIdx;
} else {
Node_t *ntp = orig_portp->portData->nodePtr;
IB_LOG_ERROR_FMT(__func__,
"Failed to find neighbor node %u, "FMT_U64" from NodeGUID "FMT_U64" Port %d",
orig_portp->nodeno, ntp->nodeInfo.NodeGUID, nodep->nodeInfo.NodeGUID, orig_portp->index);
j = -1;
return VSTATUS_BAD;
}
}
//
// If this node is hooked directly to the switch in question, we know the
// answer.
//
if (j == i) {
// Nota Bene: note the implicit assumption that port numbers
// are 8 bits long..
memset(portnos, orig_portp->portno, numLids);
IB_EXIT(__func__, VSTATUS_OK);
return VSTATUS_OK;
}
//
// We now are in a situation where in order to get from node[i]
// to node[j], we need to send to node[nodeno]. We need to find
// the ports which go to node[nodeno].
//
if (orig_portp->portData->lmc == 0) {
// select best port, _select_ports will return 1 or 0 (no path)
if ((end_port = topop->routingModule->funcs.select_ports(topop, switchp, j, ordered_ports, 1)) == 0) {
IB_LOG_ERROR_FMT(__func__,
"Failed to find an outbound port on NodeGUID "FMT_U64" to NodeGUID "FMT_U64" Port %d",
switchp->nodeInfo.NodeGUID, nodep->nodeInfo.NodeGUID, orig_portp->index);
IB_EXIT(__func__, VSTATUS_BAD);
return VSTATUS_BAD;
}
portnos[0] = ordered_ports[0].portp->index;
// update number of LIDs routed through the chosen port
if (portnos[0] != 0xff && nodep->nodeInfo.NodeType != NI_TYPE_SWITCH) {
ordered_ports[0].portp->portData->lidsRouted += lidsRoutedInc;
ordered_ports[0].nextSwp->numLidsRouted += lidsRoutedInc;
}
} else { // lmc > 0
end_port = topop->routingModule->funcs.select_ports(topop, switchp, j, ordered_ports, 0);
if (!end_port) {
IB_LOG_ERROR_FMT(__func__,
"Failed to find outbound ports on NodeGUID "FMT_U64" to NodeGUID "FMT_U64" Port %d",
switchp->nodeInfo.NodeGUID, nodep->nodeInfo.NodeGUID, orig_portp->index);
IB_EXIT(__func__, VSTATUS_BAD);
return VSTATUS_BAD;
}
if(end_port >= MAX_STL_PORTS) {
IB_LOG_ERROR_FMT(__func__,"Number of ports are greater than maximum number of ports");
IB_EXIT(__func__, VSTATUS_BAD);
return VSTATUS_BAD;
}
// balance the port order to filter the best to the top
_balance_ports(switchp, ordered_ports, end_port);
// reduce to the best 2^LMC paths
if (end_port > numLids) end_port = numLids;
// balance the final set of paths in terms of the base lid
// by selecting an appropriate offset
offset = sm_balance_base_lids(ordered_ports, end_port);
// fill in outbound port number array
for (i = 0; i < numLids; i++) {
j = (i + offset) % end_port;
portnos[i] = ordered_ports[j].portp->index;
ordered_ports[j].portp->portData->lidsRouted += lidsRoutedInc;
ordered_ports[j].nextSwp->numLidsRouted += lidsRoutedInc;
}
++ordered_ports[offset].portp->portData->baseLidsRouted;
++ordered_ports[offset].nextSwp->numBaseLidsRouted;
}
if (portnos[0] == 0xff && smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__, "Failed to setup LID 0x%.4X for switch[%d : "FMT_U64"] %s",
orig_portp->portData->lid, switchp->index, switchp->nodeInfo.NodeGUID, sm_nodeDescString(switchp));
}
IB_EXIT(__func__, VSTATUS_OK);
return VSTATUS_OK;
}
// selects all best ports to the provided switch index
// returns the number of ports found (0 if none)
//
// ** Note: This is one of the hottest functions in the FM. **MODIFY WITH CAUTION**
int
sm_routing_func_select_ports(Topology_t *topop, Node_t *switchp, int endIndex, SwitchportToNextGuid_t *ordered_ports, boolean selectBest)
{
int i, j;
uint16_t cur_speed = 0;
uint16_t best_speed = 0;
uint16_t best_lidsRouted = 0xffff;
uint32_t best_switchLidsRouted = 0xffffffff;
Node_t *next_nodep;
Port_t *portp;
SpineFirstState_t sfstate;
SpineFirstResult_t sfres;
SmPathPortmask_t ports;
uint8_t cport;
uint8_t doSpineCheck;
uint8_t end_port = 0;
i = switchp->swIdx;
j = endIndex;
ports = topop->path[Index(i, j)];
sfstate.matching = 0;
doSpineCheck = topop->routingModule->funcs.do_spine_check(topop, switchp);
while ((cport = sm_path_portmask_pop_first(&ports))) {
portp = sm_get_port(switchp, cport);
if (!sm_valid_port(portp) || portp->state <= IB_PORT_DOWN)
continue;
next_nodep = sm_find_node(topop, portp->nodeno);
if (next_nodep == NULL)
continue;
if (i == next_nodep->swIdx)
continue; // avoid loopback links
if (doSpineCheck) {
sfres = _spine_first_test(&sfstate, switchp, portp, next_nodep);
switch (sfres) {
case SPINE_FIRST_FIRST:
// spine first is enabled and this is the first.
// override anything we've previously seen with this
best_speed = sm_GetSpeed(portp->portData);
best_lidsRouted = portp->portData->lidsRouted;
best_switchLidsRouted = next_nodep->numLidsRouted;
ordered_ports[0].portp = portp;
ordered_ports[0].guid = next_nodep->nodeInfo.NodeGUID;
ordered_ports[0].sysGuid = next_nodep->nodeInfo.SystemImageGUID;
ordered_ports[0].nextSwp = next_nodep;
end_port = 1;
continue;
case SPINE_FIRST_NOMATCH:
// we're only considering spines, and this isn't one.
// discard it
continue;
case SPINE_FIRST_MATCH:
// we've seen at least one spine so far... is this one better?
// fall through to default behavior to determine
case SPINE_FIRST_NONE:
// spine first is not enabled or there are no spines off this
// node so far. balance normally
break;
}
}
cur_speed = portp->portData->portSpeed;
if (cur_speed >= best_speed) {
if (cur_speed > best_speed) {
best_speed = cur_speed;
best_lidsRouted = portp->portData->lidsRouted;
best_switchLidsRouted = next_nodep->numLidsRouted;
end_port = 0;
}
else if (selectBest) {
if (portp->portData->lidsRouted < best_lidsRouted) {
best_lidsRouted = portp->portData->lidsRouted;
best_switchLidsRouted = next_nodep->numLidsRouted;
end_port = 0;
}
else if (portp->portData->lidsRouted == best_lidsRouted &&
next_nodep->numLidsRouted < best_switchLidsRouted) {
best_switchLidsRouted = next_nodep->numLidsRouted;
end_port = 0;
}
else {
continue;
}
}
ordered_ports[end_port].portp = portp;
ordered_ports[end_port].guid = next_nodep->nodeInfo.NodeGUID;
ordered_ports[end_port].sysGuid = next_nodep->nodeInfo.SystemImageGUID;
ordered_ports[end_port].nextSwp = next_nodep;
++end_port;
}
}
return end_port;
}
Status_t
sm_routing_func_setup_pgs(struct _Topology *topop, struct _Node * srcSw, struct _Node * dstSw)
{
#ifdef __VXWORKS__
SwitchportToNextGuid_t *ordered_ports = (SwitchportToNextGuid_t *)topop->pad;
memset(ordered_ports, 0, sizeof(SwitchportToNextGuid_t) * srcSw->nodeInfo.NumPorts);
#else
SwitchportToNextGuid_t ordered_ports[MAX_STL_PORTS] = {{0}};
#endif /* __VXWORKS__ */
if (!srcSw || !dstSw) {
IB_LOG_ERROR_FMT(__func__, "Invalid source or destination pointer.");
return VSTATUS_BAD;
}
if (srcSw->nodeInfo.NodeType != NI_TYPE_SWITCH) {
IB_LOG_ERROR_FMT(__func__, "%s (0x%"PRIx64") is not a switch.",
srcSw->nodeDesc.NodeString,
srcSw->nodeInfo.NodeGUID);
return VSTATUS_BAD;
}
// Optimization. Don't waste time if AR is turned off, if
// the destination isn't a switch or if source == dest.
if (dstSw->nodeInfo.NodeType != NI_TYPE_SWITCH ||
srcSw->swIdx == dstSw->swIdx ||
!sm_adaptiveRouting.enable || !srcSw->arSupport) {
return VSTATUS_OK;
}
// If port0 isn't valid, we can't finish the calculations.
if (!sm_valid_port(&dstSw->port[0])) {
IB_LOG_ERROR_FMT(__func__, "%s (0x%"PRIx64") does not have valid port0 data.",
srcSw->nodeDesc.NodeString,
srcSw->nodeInfo.NodeGUID);
return VSTATUS_BAD;
}
int end_port = topop->routingModule->funcs.select_ports(topop, srcSw, dstSw->swIdx, ordered_ports, 0);
if (end_port <= 1) {
return VSTATUS_OK;
}
STL_PORTMASK pgMask = 0;
int i;
for (i = 0; i < end_port; ++i) {
if (ordered_ports[i].portp->index == 0 ||
ordered_ports[i].portp->index > sizeof(pgMask)*8) {
continue;
}
// Cast is necessary to prevent compiler from interpreting '1' as a signed
// int32, converting it to an int64, then or'ing
pgMask |= (((uint64)1) << (ordered_ports[i].portp->index - 1));
}
uint8_t pgid;
// This just adds PGs to the PGT until all entries
// are exhausted; it doesn't do anything to ensure that the PGs added are optimal or better than others
int rc = sm_Push_Port_Group(srcSw->pgt, pgMask, &pgid, &srcSw->pgtLen, srcSw->switchInfo.PortGroupCap);
if (rc >= 0) {
srcSw->arChange |= (rc > 0);
srcSw->switchInfo.PortGroupTop = srcSw->pgtLen;
//PGFT is independent of LFT with LMC, though it's supposed to re-use the LMC data
PORT * pgft = sm_Node_get_pgft_wr(srcSw);
uint32_t pgftLen = sm_Node_get_pgft_size(srcSw);
if (!pgft) {
IB_LOG_ERROR_FMT(__func__, "Failed to acquire memory for PGFT");
return VSTATUS_BAD;
}
// Add every lid of dstSw to srSw's pgft.
// (assuming the lid is < the pgftLen)
STL_LID portLid = 0;
for_all_port_lids(&dstSw->port[0], portLid) {
if (portLid < pgftLen) {
srcSw->arChange |= (pgft[portLid] != pgid);
pgft[portLid] = pgid;
}
}
// iterate through the end nodes attached to dstSw,
// adding their LIDs to the pgft.
// (assuming the lid is < the pgftLen)
Port_t * edgePort = NULL;
for_all_physical_ports(dstSw, edgePort) {
if (!sm_valid_port(edgePort) || edgePort->state <= IB_PORT_DOWN)
continue;
Node_t * endNode = NULL;
Port_t * endPort = sm_find_neighbor_node_and_port(topop, edgePort, &endNode);
if (!endNode || endNode->nodeInfo.NodeType != NI_TYPE_CA)
continue;
if (!endPort || !sm_valid_port(endPort))
continue;
for_all_port_lids(endPort, portLid) {
if (portLid < pgftLen) {
srcSw->arChange |= (pgft[portLid] != pgid);
pgft[portLid] = pgid;
}
}
}
}
return VSTATUS_OK;
}
int
sm_routing_func_get_port_group(Topology_t *topop, Node_t *switchp, Node_t *nodep, uint8_t *portnos)
{
int i, j;
int end_port = 0;
#ifdef __VXWORKS__
SwitchportToNextGuid_t *ordered_ports = (SwitchportToNextGuid_t *)topop->pad;
memset(ordered_ports, 0, sizeof(SwitchportToNextGuid_t) * switchp->nodeInfo.NumPorts);
#else
SwitchportToNextGuid_t ordered_ports[MAX_STL_PORTS] = {{0}};
#endif /* __VXWORKS__ */
IB_ENTER(__func__, switchp, nodep, 0, 0);
memset((void*)portnos, 0xff, sizeof(uint8_t)*128);
if (nodep->nodeInfo.NodeType != NI_TYPE_SWITCH) {
IB_EXIT(__func__, VSTATUS_OK);
return 0;
}
i = switchp->swIdx;
j = nodep->swIdx;
if (j == i) {
IB_EXIT(__func__, VSTATUS_OK);
return 0;
}
end_port = topop->routingModule->funcs.select_ports(topop, switchp, j, ordered_ports, 0);
qsort(ordered_ports, end_port, sizeof(SwitchportToNextGuid_t), _compare_guids);
for (i=0; i<end_port; i++) {
portnos[i] = ordered_ports[i].portp->index;
}
if (portnos[0] == 0xff && smDebugPerf) {
IB_LOG_INFINI_INFO_FMT(__func__, "Failed to get portGroup from switch %s to switch %s",
sm_nodeDescString(switchp), sm_nodeDescString(nodep));
}
IB_EXIT(__func__, VSTATUS_OK);
return end_port;
}
Status_t
sm_routing_func_select_slsc_map(Topology_t *topop, Node_t *nodep,
Port_t *in_portp, Port_t *out_portp, STL_SLSCMAP *outSlscMap)
{
return sm_select_slsc_map(topop, nodep, in_portp, out_portp, outSlscMap);
}
Status_t
sm_routing_func_select_scsl_map(Topology_t *topop, Node_t *nodep,
Port_t *in_portp, Port_t *out_portp, STL_SCSLMAP *outScslMap)
{
return sm_select_scsl_map(topop, nodep, in_portp, out_portp, outScslMap);
}
Status_t
sm_routing_func_select_scsc_map(Topology_t *topop, Node_t *switchp, int getSecondary, int *numBlocks, STL_SCSC_MULTISET** scscmap)
{
int i;
Port_t * portp = NULL;
STL_SCSC_MULTISET *scsc=NULL;
int portToSet = 0;
int needsSet = 0;
STL_SCSCMAP *scscTmp= NULL;
*numBlocks = 0;
if ((topology_passcount && !topology_switch_port_changes && !sm_config.forceAttributeRewrite) || getSecondary)
return VSTATUS_OK;
if (vs_pool_alloc(&sm_pool, sizeof(STL_SCSC_MULTISET), (void *) &scsc) != VSTATUS_OK) {
return VSTATUS_BAD;
}
memset(scsc, 0, sizeof(STL_SCSC_MULTISET));
// TBD: Fattree is simple 1:1 mapping.
// leaving in as test vehicle but may remove later.
for (i=0; i<STL_MAX_SCS; i++) {
scsc->SCSCMap.SCSCMap[i].SC = i;
}
for_all_physical_ports(switchp, portp) {
if (!sm_valid_port(portp) || portp->state <= IB_PORT_DOWN) continue;
needsSet = !portp->portData->current.scsc || sm_config.forceAttributeRewrite;
if (!needsSet) {
for (i=1; i<=switchp->nodeInfo.NumPorts; i++) {
scscTmp = sm_lookupPortDataSCSCMap(portp, i-1, 0);
if (!scscTmp || (memcmp((void *)&scsc->SCSCMap, (void *)scscTmp, sizeof(STL_SCSCMAP)) != 0)) {
needsSet = 1;
break;
}
}
}
if (needsSet) {
StlAddPortToPortMask(scsc->IngressPortMask, portp->index);
portToSet = 1;
}
}
if (portToSet) {
// Set entire table for any ingress port needing a set
for (i=1; i<=switchp->nodeInfo.NumPorts; i++)
StlAddPortToPortMask(scsc->EgressPortMask, i);
*numBlocks = 1;
*scscmap = scsc;
} else {
(void) vs_pool_free(&sm_pool, scsc);
}
return VSTATUS_OK;
}
Status_t
sm_routing_func_select_scvl_map_fixedmap(Topology_t *topop, Node_t *nodep,
Port_t *in_portp, Port_t *out_portp, STL_SCVLMAP *outScvlMap)
{
Qos_t *qos = sm_get_qos(out_portp->portData->vl1);
memcpy(outScvlMap, &qos->scvl, sizeof(STL_SCVLMAP));
return VSTATUS_OK;
}
Status_t
sm_routing_func_select_vlvf_map(Topology_t *topop, Node_t *nodep, Port_t *portp, VlVfMap_t * vlvfmap)
{
return sm_select_vlvf_map(topop, nodep, portp, vlvfmap);
}
Status_t
sm_routing_func_select_vlbw_map(Topology_t *topop, Node_t *nodep, Port_t *portp, VlBwMap_t * vlbwmap)
{
Qos_t *qos = sm_get_qos(portp->portData->vl1);
memcpy(vlbwmap, &qos->vlBandwidth, sizeof(VlBwMap_t));
return VSTATUS_OK;
}
Status_t
sm_routing_func_select_scvlr_map(Topology_t *topop, uint8_t vlCap, STL_SCVLMAP *outScvlMap)
{
return sm_select_scvlr_map(topop, vlCap, outScvlMap);
}
Status_t
sm_routing_func_fill_stl_vlarb_table(Topology_t *topop, Node_t *nodep, Port_t *portp, PortDataVLArb* arbp)
{
return sm_fill_stl_vlarb_table(topop, nodep, portp, arbp);
}
Status_t
sm_routing_func_select_path_lids(Topology_t *topop, Port_t *srcPortp, STL_LID slid, Port_t *dstPortp,
STL_LID dlid, STL_LID *outSrcLids, uint8_t *outSrcLen, STL_LID *outDstLids, uint8_t *outDstLen)
{
return sm_select_path_lids(topop, srcPortp, slid, dstPortp, dlid, outSrcLids, outSrcLen, outDstLids, outDstLen);
}
Status_t
sm_routing_func_process_swIdx_change_noop(Topology_t * topop, int old_idx, int new_idx, int last_idx)
{
return VSTATUS_OK;
}
int
sm_routing_func_check_switch_path_change(Topology_t * oldtp, Topology_t * newtp, Node_t *switchp)
{
/* PR 115770. If there is any switch cost change, we recompute full LFT for all switches.
* Just checking cost/path of this switch to other switches is not fully reliable.
* This is because there may be multiple paths between switches with same cost
* and routes are distributed over these paths. The cost/path matrix stores only one best
* cost and path - so if one of the paths is not possible, just checking cost/path
* matrix may not tell us that there may be changes somewhere along one of the paths.
*
*/
if (topology_cost_path_changes)
return 1;
return 0;
}
boolean
sm_routing_func_needs_routing_recalc_false(Topology_t * topop, Node_t * nodep)
{
return 0;
}
boolean
sm_routing_func_needs_routing_recalc_true(Topology_t * topop, Node_t * nodep)
{
return 1;
}
boolean
sm_routing_func_needs_lft_recalc(Topology_t * topop, Node_t * nodep)
{
return !routing_recalculated && !nodep->routingRecalculated;
}
boolean
sm_routing_func_do_spine_check(Topology_t * topop, Node_t * switchp)
{
return sm_config.spine_first_routing && switchp->internalLinks;
}
Status_t
sm_routing_func_write_minimal_lft_blocks(Topology_t * topop, Node_t * switchp, SmpAddr_t * addr)
{
return sm_write_minimal_lft_blocks(topop, switchp, addr);
}
Status_t
sm_routing_func_write_full_lfts_LR(Topology_t * topop, SwitchList_t * swlist, int rebalance)
{
return sm_write_full_lfts_by_block_LR(topop, swlist, rebalance);
}
Status_t
sm_routing_func_route_old_switch(Topology_t *src_topop, Topology_t *dst_topop, Node_t *nodep)
{
return sm_routing_route_old_switch(src_topop, dst_topop, nodep);
}
boolean
sm_routing_func_handle_fabric_change(Topology_t *topop, Node_t *oldSwitchp, Node_t *switchp)
{
Port_t* portp = NULL;
int port;
for (port= bitset_find_first_one(&switchp->initPorts); port >=0;
port= bitset_find_next_one(&switchp->initPorts, port+1)) {
portp = sm_get_port(switchp, port);
if (!sm_valid_port(portp)) continue;
if (portp->portData->isIsl)
// New ISL coming up
return 1;
if (bitset_test(&oldSwitchp->activePorts, port)) {
// If it was active, check to see if it was an ISL
portp = sm_get_port(oldSwitchp, port);
if (!sm_valid_port(portp) || portp->portData->isIsl)
// Moved from switch port to HFI
return 1;
}
}
if (!bitset_equal(&oldSwitchp->activePorts, &switchp->activePorts)) {
for (port= bitset_find_first_one(&oldSwitchp->activePorts); port >=0;
port= bitset_find_next_one(&oldSwitchp->activePorts, port+1)) {
if (bitset_test(&switchp->activePorts, port)) continue;
portp = sm_get_port(oldSwitchp, port);
if (!sm_valid_port(portp)) continue;
if (portp->portData->isIsl)
// Lost an ISL
return 1;
}
}
return 0;
}
Status_t
sm_routing_func_update_bw(RoutingModule_t *rm, VirtualFabrics_t *VirtualFabrics)
{
return sm_update_bw(rm, VirtualFabrics);
}
Status_t
sm_routing_func_assign_scs_to_sls_fixedmap(RoutingModule_t *rm, VirtualFabrics_t *VirtualFabrics)
{
return sm_assign_scs_to_sls_FixedMap(rm, VirtualFabrics);
}
Status_t
sm_routing_func_assign_scs_to_sls_nonfixedmap(RoutingModule_t *rm, VirtualFabrics_t *VirtualFabrics)
{
return sm_assign_scs_to_sls_NonFixedMap(rm, VirtualFabrics);
}
Status_t
sm_routing_func_assign_sls(RoutingModule_t *rm, VirtualFabrics_t *vfs)
{
int noqos_base = -1;
int noqos_resp = -1;
int noqos_mcast = -1;
int qos;
Status_t ret = VSTATUS_OK;
bitset_t usedSLs;
int numSCs = 0;
if (!vfs) {
return ret;
}
bitset_init(&sm_pool, &usedSLs, STL_MAX_SLS);
if (VSTATUS_OK != (ret = _handle_preassigned_sl(rm, vfs, &usedSLs, &numSCs))) {
goto bail;
}
// Now assign the unspecified SLs.
// Assign SLs to QOSGroups in the order they appear in the config file.
// [This provides a predictable output for users.]
for (qos=0; qos < vfs->number_of_qos_all; qos++) {
QosConfig_t *pQos = &vfs->qos_all[qos];
if (VSTATUS_OK != (ret = _handle_unassigned_sl(pQos, &pQos->base_sl, &usedSLs, &noqos_base,
0, MAX_SLS, "BaseSL", &numSCs, rm->funcs.num_routing_scs(pQos->base_sl, 0))))
goto bail;
if (pQos->requires_resp_sl) {
if (VSTATUS_OK != (ret = _handle_unassigned_sl(pQos, &pQos->resp_sl, &usedSLs, &noqos_resp,
1, STL_MAX_SLS, "RespSL", &numSCs, rm->funcs.num_routing_scs(pQos->resp_sl, 0))))
goto bail;
} else
if (pQos->resp_sl == UNDEFINED_XML8) {
pQos->resp_sl = pQos->base_sl;
if (sm_config.sm_debug_vf)
IB_LOG_INFINI_INFO_FMT_VF(pQos->name, "",
"Assigning RespSL to BaseSL %d", pQos->resp_sl);
}
if (pQos->contains_mcast && rm->funcs.mcast_isolation_required()) {
if (VSTATUS_OK != (ret = _handle_unassigned_sl(pQos, &pQos->mcast_sl, &usedSLs,
&noqos_mcast, 0, MAX_SLS, "MulticastSL", &numSCs,
rm->funcs.num_routing_scs(pQos->mcast_sl, 1))))
goto bail;
} else if (pQos->mcast_sl == UNDEFINED_XML8) {
pQos->mcast_sl = pQos->base_sl;
if (sm_config.sm_debug_vf)
IB_LOG_INFINI_INFO_FMT_VF(pQos->name, "",
"Assigning multicast SL to base SL %d", pQos->mcast_sl);
}
}
// TODO: numSCs here does not take into account whether or not MulticastSLs can share
// the same VL, or if multicast is being overlayed onto the base. These are
// determined by the routing algorithm. Only side effect right now is printing
// the wrong information to the log. numSCs isn't used anywhere else.
IB_LOG_INFINI_INFO_FMT(__func__, "%d QOSGroups require %d SLs and %d SCs for operation",
vfs->number_of_qos_all, (int)bitset_nset(&usedSLs), numSCs);
bail:
bitset_free(&usedSLs);
return ret;
}
int
sm_routing_func_min_vls(void)
{
return sm_config.min_supported_vls;
}
int
sm_routing_func_max_vls(void)
{
return SCVLMAP_BASE;
}
int
sm_routing_func_one_routing_scs(int sl, boolean mcast_sl)
{
return 1;
}
static void
sm_routing_func_delete_node(Node_t* nodep) {
if (nodep->routingData) {
vs_pool_free(&sm_pool, nodep->routingData);
nodep->routingData = NULL;
}
}
int
sm_routing_func_no_oversubscribe(int sl, boolean mcast_sl)
{
return 0;
}
Status_t
sm_routing_func_process_xml_config_noop(void)
{
return VSTATUS_OK;
}
RoutingFuncs_t defaultRoutingFuncs = {
pre_process_discovery: sm_routing_func_pre_process_discovery_noop,
discover_node: sm_routing_func_discover_node_noop,
discover_node_port: sm_routing_func_discover_node_port_noop,
post_process_discovery: sm_routing_func_post_process_discovery_noop,
post_process_routing: sm_routing_func_post_process_routing_noop,
post_process_routing_copy: sm_routing_func_post_process_routing_copy_noop,
allocate_cost_matrix: sm_routing_func_alloc_cost_matrix_floyds,
initialize_cost_matrix: sm_routing_func_init_cost_matrix_floyds,
calculate_cost_matrix: sm_routing_func_calc_cost_matrix_floyds,
routing_mode: sm_routing_func_routing_mode_linear,
requires_dr: sm_routing_func_node_false,
extended_scsc_in_use: sm_routing_func_false,
copy_routing: sm_routing_func_copy_routing_lfts,
init_switch_routing: sm_routing_func_init_switch_routing_lfts,
setup_switches_lrdr: sm_setup_switches_lrdr_wave_discovery_order,
calculate_routes: sm_routing_func_calculate_lft,
setup_xft: sm_routing_func_setup_xft,
select_ports: sm_routing_func_select_ports,
setup_pgs: sm_routing_func_setup_pgs,
get_port_group: sm_routing_func_get_port_group,
select_slsc_map: sm_routing_func_select_slsc_map,
select_scsl_map: sm_routing_func_select_scsl_map,
select_scsc_map: sm_routing_func_select_scsc_map,
select_scvl_map: sm_routing_func_select_scvl_map_fixedmap,
select_vlvf_map: sm_routing_func_select_vlvf_map,
select_vlbw_map: sm_routing_func_select_vlbw_map,
select_scvlr_map: sm_routing_func_select_scvlr_map,
fill_stl_vlarb_table: sm_routing_func_fill_stl_vlarb_table,
select_path_lids: sm_routing_func_select_path_lids,
process_swIdx_change: sm_routing_func_process_swIdx_change_noop,
check_switch_path_change: sm_routing_func_check_switch_path_change,
needs_routing_recalc: sm_routing_func_needs_lft_recalc,
can_send_partial_routes: sm_routing_func_false,
do_spine_check: sm_routing_func_do_spine_check,
write_minimal_routes: sm_routing_func_write_minimal_lft_blocks,
write_full_routes_LR: sm_routing_func_write_full_lfts_LR,
route_old_switch: sm_routing_func_route_old_switch,
build_spanning_trees: sm_build_spanning_trees,
handle_fabric_change: sm_routing_func_handle_fabric_change,
update_bw: sm_routing_func_update_bw,
assign_scs_to_sls: sm_routing_func_assign_scs_to_sls_fixedmap,
assign_sls: sm_routing_func_assign_sls,
mcast_isolation_required: sm_routing_func_false,
overlay_mcast: sm_routing_func_false,
min_vls: sm_routing_func_min_vls,
max_vls: sm_routing_func_max_vls,
num_routing_scs: sm_routing_func_one_routing_scs,
oversubscribe_factor: sm_routing_func_no_oversubscribe,
delete_node: sm_routing_func_delete_node,
process_xml_config: sm_routing_func_process_xml_config_noop
};
|
GB_binop__pow_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_fc32
// A.*B function (eWiseMult): GB_AemultB__pow_fc32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_fc32
// C+=b function (dense accum): GB_Cdense_accumb__pow_fc32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fc32
// C=scalar+B GB_bind1st__pow_fc32
// C=scalar+B' GB_bind1st_tran__pow_fc32
// C=A+scalar GB_bind2nd__pow_fc32
// C=A'+scalar GB_bind2nd_tran__pow_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_cpowf (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_cpowf (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FC32 || GxB_NO_POW_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_fc32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_fc32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_cpowf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_fc32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_cpowf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_cpowf (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_fc32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_cpowf (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_uint32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_int64
// op(A') function: GB_tran__abs_uint32_int64
// C type: uint32_t
// A type: int64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_int64
(
uint32_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
spalart_allmaras_turbulence_model.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
// Riccardo Rossi
//
#if !defined(KRATOS_SPALART_ALLMARAS_TURBULENCE_H_INCLUDED )
#define KRATOS_SPALART_ALLMARAS_TURBULENCE_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "containers/model.h"
#include "processes/process.h"
#include "includes/cfd_variables.h"
#include "solving_strategies/strategies/solving_strategy.h"
//#include "solving_strategies/strategies/residualbased_linear_strategy.h"
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/schemes/residualbased_incremental_aitken_static_scheme.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/convergencecriterias/residual_criteria.h"
// Application includes
#include "custom_utilities/periodic_condition_utilities.h"
#include "fluid_dynamics_application_variables.h"
namespace Kratos
{
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// An impelementation of the Spalart-Allmaras turbulence model for incompressible flows.
/** Detail class definition.
*/
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
class SpalartAllmarasTurbulenceModel : public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of SpalartAllmarasTurbulenceModel
KRATOS_CLASS_POINTER_DEFINITION(SpalartAllmarasTurbulenceModel);
///@}
///@name Life Cycle
///@{
/// Constructor for the Spalart-Allmaras turbulence model.
/**
* @param rModelPart ModelPart for the flow problem
* @param pLinearSolver Pointer to the linear solver to use in the solution of the viscosity transport problem
* @param DomainSize Spatial dimension of the problem (2 or 3)
* @param NonLinearTol Relative tolerance for the turbulent viscosity transport problem (convergence is checked using the norm of the residual)
* @param MaxIter Maximum number of iterations for the solution of the viscosity transport problem
* @param ReformDofSet True if the degrees of freedom change during the problem (for example due to remeshing) false otherwise
* @param TimeOrder Order for time integration (1 - Backward Euler will be used, 2 - BDF2 method)
*/
SpalartAllmarasTurbulenceModel(
ModelPart& rModelPart,
typename TLinearSolver::Pointer pLinearSolver,
unsigned int DomainSize,
double NonLinearTol,
unsigned int MaxIter,
bool ReformDofSet,
unsigned int TimeOrder)
: mr_model_part(rModelPart),
mrSpalartModelPart(rModelPart.GetModel().CreateModelPart("SpalartModelPart")),
mdomain_size(DomainSize),
mtol(NonLinearTol),
mmax_it(MaxIter),
mtime_order(TimeOrder),
madapt_for_fractional_step(false)
{
//************************************************************************************************
//check that the variables needed are in the model part
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(DISTANCE)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", DISTANCE);
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(VELOCITY)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", VELOCITY);
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(MOLECULAR_VISCOSITY)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", MOLECULAR_VISCOSITY);
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(TURBULENT_VISCOSITY)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", TURBULENT_VISCOSITY);
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(MESH_VELOCITY)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", MESH_VELOCITY);
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(VISCOSITY)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", VISCOSITY);
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(NODAL_AREA)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", NODAL_AREA);
if (!(rModelPart.NodesBegin()->SolutionStepsDataHas(TEMP_CONV_PROJ)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", TEMP_CONV_PROJ);
if (mr_model_part.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::logic_error, "insufficient buffer size for BDF2, currently buffer size is ", mr_model_part.GetBufferSize());
//************************************************************************************************
//construct a new auxiliary model part
mrSpalartModelPart.GetNodalSolutionStepVariablesList() = mr_model_part.GetNodalSolutionStepVariablesList();
mrSpalartModelPart.SetBufferSize(3);
mrSpalartModelPart.Nodes() = mr_model_part.Nodes();
mrSpalartModelPart.SetProcessInfo(mr_model_part.pGetProcessInfo());
mrSpalartModelPart.SetProperties(mr_model_part.pProperties());
std::string ElementName;
if (DomainSize == 2)
ElementName = std::string("SpalartAllmaras2D");
else
ElementName = std::string("SpalartAllmaras3D");
const Element& rReferenceElement = KratosComponents<Element>::Get(ElementName);
//generating the elements
for (ModelPart::ElementsContainerType::iterator iii = mr_model_part.ElementsBegin(); iii != mr_model_part.ElementsEnd(); iii++)
{
Properties::Pointer properties = iii->pGetProperties();
Element::Pointer p_element = rReferenceElement.Create(iii->Id(), iii->GetGeometry(), properties);
mrSpalartModelPart.Elements().push_back(p_element);
}
// pointer types for the solution strategy construcion
typedef typename Scheme< TSparseSpace, TDenseSpace >::Pointer SchemePointerType;
typedef typename ConvergenceCriteria< TSparseSpace, TDenseSpace >::Pointer ConvergenceCriteriaPointerType;
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
// Solution scheme: Aitken iterations
const double DefaultAitkenOmega = 1.0;
SchemePointerType pScheme = SchemePointerType( new ResidualBasedIncrementalAitkenStaticScheme< TSparseSpace, TDenseSpace > (DefaultAitkenOmega) );
// SchemePointerType pScheme = SchemePointerType( new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > () );
// Convergence criteria
const double NearlyZero = 1.0e-20;
ConvergenceCriteriaPointerType pConvCriteria = ConvergenceCriteriaPointerType( new ResidualCriteria<TSparseSpace,TDenseSpace>(NonLinearTol,NearlyZero) );
// Builder and solver
BuilderSolverTypePointer pBuildAndSolver = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> > (pLinearSolver, TURBULENT_VISCOSITY));
// Strategy
bool CalculateReactions = false;
bool MoveMesh = false;
mpSolutionStrategy = StrategyPointerType( new ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(mrSpalartModelPart,pScheme,pConvCriteria,pBuildAndSolver,MaxIter,CalculateReactions,ReformDofSet,MoveMesh));
mpSolutionStrategy->SetEchoLevel(0);
mpSolutionStrategy->Check();
}
/// Destructor.
~SpalartAllmarasTurbulenceModel() override
{
Model& r_model = mrSpalartModelPart.GetModel();
r_model.DeleteModelPart("SpalartModelPart");
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Solve an iteration of the turbulent viscosity
void Execute() override
{
KRATOS_TRY
if(madapt_for_fractional_step == true)
{
if (!(mrSpalartModelPart.NodesBegin()->SolutionStepsDataHas(FRACT_VEL)))
KRATOS_THROW_ERROR(std::logic_error, "Variable is not in the model part:", FRACT_VEL);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mrSpalartModelPart.Nodes().size()); i++)
{
ModelPart::NodesContainerType::iterator it = mrSpalartModelPart.NodesBegin() + i;
it->FastGetSolutionStepValue(VELOCITY) = it->FastGetSolutionStepValue(FRACT_VEL);
}
}
AuxSolve();
//update viscosity on the nodes
for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin();
i != mrSpalartModelPart.NodesEnd(); ++i)
{
double molecular_viscosity = i->FastGetSolutionStepValue(MOLECULAR_VISCOSITY);
double turbulent_viscosity = i->FastGetSolutionStepValue(TURBULENT_VISCOSITY);
if(turbulent_viscosity < 0)
{
i->FastGetSolutionStepValue(TURBULENT_VISCOSITY) = 1e-9;
i->FastGetSolutionStepValue(VISCOSITY) = molecular_viscosity;
}
else
{
const double cv1_3 = 7.1*7.1*7.1;
double xi = turbulent_viscosity / molecular_viscosity;
double xi_3 = xi*xi*xi;
double fv1 = xi_3 / (xi_3 + cv1_3);
double viscosity = fv1 * turbulent_viscosity + molecular_viscosity;
i->FastGetSolutionStepValue(VISCOSITY) = viscosity;
}
}
KRATOS_CATCH("");
}
void SetMaxIterations(unsigned int max_it)
{
KRATOS_TRY
mmax_it = max_it;
KRATOS_CATCH("");
}
void AdaptForFractionalStep()
{
KRATOS_TRY
madapt_for_fractional_step = true;
KRATOS_CATCH("");
}
void ActivateDES(double CDES)
{
KRATOS_TRY;
mrSpalartModelPart.GetProcessInfo()[C_DES] = CDES;
/*
//update viscosity on the nodes
for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin();
i != mrSpalartModelPart.NodesEnd(); ++i)
{
double distance = i->FastGetSolutionStepValue(DISTANCE);
const array_1d<double,3>& xc = i->Coordinates();
double h_max = 0.0;
//compute nodal h (by max edge size)
GlobalPointersVector<Node<3> >& neigbours = i->GetValue(NEIGHBOUR_NODES);
for(GlobalPointersVector<Node<3> >::iterator ineighb=neigbours.begin(); ineighb!=neigbours.end(); ineighb++)
{
array_1d<double,3> aux = ineighb->Coordinates();
aux -= xc;
double h = norm_2(aux);
if(h > h_max) h_max=h;
}
if(h_max == 0.0)
KRATOS_THROW_ERROR(std::logic_error,"unexpected isolated node. Wrong node has Id ",i->Id());
if(distance > h_max*CDES)
i->FastGetSolutionStepValue(DISTANCE) = h_max*CDES;
}*/
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "SpalartAllmarasTurbulenceModel";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "SpalartAllmarasTurbulenceModel";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
ModelPart& mr_model_part;
ModelPart& mrSpalartModelPart;
unsigned int mdomain_size;
double mtol;
unsigned int mmax_it;
unsigned int mtime_order;
bool madapt_for_fractional_step;
typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer mpSolutionStrategy;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
/// Protected constructor, initializing only the references (for derived classes)
SpalartAllmarasTurbulenceModel(ModelPart& rModelPart)
:
Process(),
mr_model_part(rModelPart),
mrSpalartModelPart(rModelPart.GetModel().CreateModelPart("SpalartModelPart"))
{}
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
//*********************************************************************************
//**********************************************************************
/*double*/
void AuxSolve()
{
KRATOS_TRY
//calculate the BDF coefficients
ProcessInfo& rCurrentProcessInfo = mrSpalartModelPart.GetProcessInfo();
double Dt = rCurrentProcessInfo[DELTA_TIME];
if (mtime_order == 2)
{
double dt_old = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double rho = dt_old / Dt;
double coeff = 1.0 / (Dt * rho * rho + Dt * rho);
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3);
BDFcoeffs[0] = coeff * (rho * rho + 2.0 * rho); //coefficient for step n+1
BDFcoeffs[1] = -coeff * (rho * rho + 2.0 * rho + 1.0); //coefficient for step n
BDFcoeffs[2] = coeff;
}
else
{
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2);
BDFcoeffs[0] = 1.0 / Dt; //coefficient for step n+1
BDFcoeffs[1] = -1.0 / Dt; //coefficient for step n
}
// unsigned int iter = 0;
// double ratio;
// bool is_converged = false;
// double dT_norm = 0.0;
// double T_norm = 0.0;
int current_fract_step = rCurrentProcessInfo[FRACTIONAL_STEP];
rCurrentProcessInfo[FRACTIONAL_STEP] = 2;
CalculateProjection();
rCurrentProcessInfo[FRACTIONAL_STEP] = 1;
mpSolutionStrategy->Solve();
rCurrentProcessInfo[FRACTIONAL_STEP] = current_fract_step;
// while (iter++ < mmax_it && is_converged == false)
// {
// rCurrentProcessInfo[FRACTIONAL_STEP] = 1;
// dT_norm = mpSolutionStrategy->Solve();
// T_norm = CalculateVarNorm();
// CalculateProjection();
//// KRATOS_WATCH(dT_norm)
//// KRATOS_WATCH(T_norm)
// ratio = 1.00;
// if (T_norm != 0.00)
// ratio = dT_norm / T_norm;
// else
// {
// std::cout << "Nu norm = " << T_norm << " dNu_norm = " << dT_norm << std::endl;
// }
// if (dT_norm < 1e-11)
// ratio = 0; //converged
// if (ratio < mtol)
// is_converged = true;
// std::cout << " SA iter = " << iter << " ratio = " << ratio << std::endl;
// }
// return dT_norm;
KRATOS_CATCH("")
}
//******************************************************************************************************
//******************************************************************************************************
///calculation of temperature norm
double CalculateVarNorm()
{
KRATOS_TRY;
double norm = 0.00;
for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin();
i != mrSpalartModelPart.NodesEnd(); ++i)
{
norm += pow(i->FastGetSolutionStepValue(TURBULENT_VISCOSITY), 2);
}
return sqrt(norm);
KRATOS_CATCH("")
}
///calculation of projection
void CalculateProjection()
{
KRATOS_TRY;
const ProcessInfo& rCurrentProcessInfo = mrSpalartModelPart.GetProcessInfo();
//first of all set to zero the nodal variables to be updated nodally
for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin();
i != mrSpalartModelPart.NodesEnd(); ++i)
{
(i)->FastGetSolutionStepValue(TEMP_CONV_PROJ) = 0.00;
(i)->FastGetSolutionStepValue(NODAL_AREA) = 0.00;
}
//add the elemental contributions for the calculation of the velocity
//and the determination of the nodal area
for (ModelPart::ElementIterator i = mrSpalartModelPart.ElementsBegin();
i != mrSpalartModelPart.ElementsEnd(); ++i)
{
(i)->InitializeSolutionStep(rCurrentProcessInfo);
}
Communicator& rComm = mrSpalartModelPart.GetCommunicator();
rComm.AssembleCurrentData(NODAL_AREA);
rComm.AssembleCurrentData(TEMP_CONV_PROJ);
// Obtain nodal projection of the residual
for (ModelPart::NodeIterator i = mrSpalartModelPart.NodesBegin();
i != mrSpalartModelPart.NodesEnd(); ++i)
{
const double NodalArea = i->FastGetSolutionStepValue(NODAL_AREA);
if(NodalArea > 0.0)
{
double& rConvProj = i->FastGetSolutionStepValue(TEMP_CONV_PROJ);
rConvProj /= NodalArea;
}
}
KRATOS_CATCH("")
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
SpalartAllmarasTurbulenceModel & operator=(SpalartAllmarasTurbulenceModel const& rOther)
{
return *this;
}
/// Copy constructor.
SpalartAllmarasTurbulenceModel(SpalartAllmarasTurbulenceModel const& rOther)
: mr_model_part(rOther.mr_model_part), mdomain_size(rOther.mdomain_size)
{
}
///@}
}; // Class SpalartAllmarasTurbulenceModel
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
inline std::istream & operator >>(std::istream& rIStream,
SpalartAllmarasTurbulenceModel<TSparseSpace, TDenseSpace, TLinearSolver>& rThis)
{
return rIStream;
}
/// output stream function
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
inline std::ostream & operator <<(std::ostream& rOStream,
const SpalartAllmarasTurbulenceModel<TSparseSpace, TDenseSpace, TLinearSolver>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_SPALART_ALLMARAS_TURBULENCE_H_INCLUDED defined
|
GB_unop__bnot_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_int16_int16)
// op(A') function: GB (_unop_tran__bnot_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_int16_int16)
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel.h | void pair_HMM_forward(
const int teams,
const int threads,
const int cur_i,
const int cur_j,
//const double forward_matrix_in[x_dim+1][y_dim+1][batch][states-1],
const fArray *__restrict forward_matrix_in,
//const double transitions[x_dim+1][batch][states-1][states],
const tArray *__restrict transitions,
//const double emissions[x_dim+1][y_dim+1][batch][states-1],
const fArray *__restrict emissions,
// const double likelihood[2][2][batch][states-1],
const lArray *__restrict likelihood,
//const double start_transitions[batch][states-1],
const sArray *__restrict start_transitions,
//double forward_matrix_out[x_dim+1][y_dim+1][batch][states-1])
fArray *__restrict forward_matrix_out)
{
#pragma omp target teams num_teams(teams) thread_limit(threads)
{
double e[batch][states-1];
double f01[1][batch][2];
double mul_3d[1][batch][2];
double mul_4d[4][batch][1][2];
#pragma omp parallel
{
int batch_id = omp_get_team_num();
int states_id = omp_get_thread_num();
e[batch_id][states_id] = emissions[cur_i][cur_j][batch_id][states_id];
double t[2][2][batch][2][2];
for (int k = 0; k < 2; k++) {
for (int l = 0; l < 2; l++) {
t[0][0][batch_id][k][l] = transitions[cur_i - 1][batch_id][k][l];
t[0][1][batch_id][k][l] = transitions[cur_i - 1][batch_id][k][l];
t[1][0][batch_id][k][l] = transitions[cur_i][batch_id][k][l];
t[1][1][batch_id][k][l] = transitions[cur_i][batch_id][k][l];
}
}
#pragma omp barrier
if (cur_i > 0 && cur_j == 0) {
if (cur_i == 1) {
forward_matrix_out[1][0][batch_id][states_id] =
start_transitions[batch_id][states_id] * e[0][states_id];
}
else {
double t01[batch][2][2];
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
t01[batch_id][j][k] = t[0][1][batch_id][j][k];
}
}
f01[0][batch_id][states_id] =
forward_matrix_in[cur_i - 1][cur_j][batch_id][states_id];
#pragma omp barrier
double s = 0.0;
for (int k = 0; k < 2; k++)
s += f01[0][batch_id][k] * t01[batch_id][k][states_id];
s *= (e[batch_id][states_id] * likelihood[0][1][batch_id][states_id]);
mul_3d[0][batch_id][states_id] = s;
#pragma omp barrier
forward_matrix_out[cur_i][0][batch_id][states_id] = mul_3d[0][batch_id][states_id];
}
}
else if (cur_i > 0 and cur_j > 0) {
double f[2][2][batch][1][2];
for (int i = 0; i < 2; i++) {
f[0][0][batch_id][0][i] = forward_matrix_in[cur_i-1][cur_j-1][batch_id][i];
f[0][1][batch_id][0][i] = forward_matrix_in[cur_i-1][cur_j][batch_id][i];
f[1][0][batch_id][0][i] = forward_matrix_in[cur_i][cur_j-1][batch_id][i];
f[1][1][batch_id][0][i] = forward_matrix_in[cur_i][cur_j][batch_id][i];
}
#pragma omp barrier
double s0 = 0.0;
double s1 = 0.0;
double s2 = 0.0;
double s3 = 0.0;
for (int k = 0; k < 2; k++) {
s0 += f[0][0][batch_id][0][k] * t[0][0][batch_id][k][states_id];
s1 += f[0][1][batch_id][0][k] * t[0][1][batch_id][k][states_id];
s2 += f[1][0][batch_id][0][k] * t[1][0][batch_id][k][states_id];
s3 += f[1][1][batch_id][0][k] * t[1][1][batch_id][k][states_id];
}
s0 *= likelihood[0][0][batch_id][states_id];
s1 *= likelihood[0][1][batch_id][states_id];
s2 *= likelihood[1][0][batch_id][states_id];
s3 *= likelihood[1][1][batch_id][states_id];
mul_4d[0][batch_id][0][states_id] = s0;
mul_4d[1][batch_id][0][states_id] = s1;
mul_4d[2][batch_id][0][states_id] = s2;
mul_4d[3][batch_id][0][states_id] = s3;
#pragma omp barrier
for (int j = 0; j < 2; j++) {
double summation = mul_4d[0][batch_id][0][j] +
mul_4d[1][batch_id][0][j] +
mul_4d[2][batch_id][0][j] +
mul_4d[3][batch_id][0][j];
summation *= e[batch_id][j];
forward_matrix_out[cur_i][cur_j][batch_id][j] = summation;
}
}
}
}
}
|
GB_binop__le_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_bool)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_03__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_bool)
// A*D function (colscale): GB (_AxD__le_bool)
// D*A function (rowscale): GB (_DxB__le_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__le_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__le_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_bool)
// C=scalar+B GB (_bind1st__le_bool)
// C=scalar+B' GB (_bind1st_tran__le_bool)
// C=A+scalar GB (_bind2nd__le_bool)
// C=A'+scalar GB (_bind2nd_tran__le_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hci.c | /*
* Slater-Condon rule implementation for Heat-Bath CI
* Author: Alexander Sokolov <alexander.y.sokolov@gmail.com>
*/
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "hci.h"
//#include <omp.h>
#include <limits.h>
void contract_h_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1) {
#pragma omp parallel default(none) shared(h1, eri, norb, neleca, nelecb, strs, civec, hdiag, ndet, ci1)
{
size_t ip, jp, p;
int nset = norb / 64 + 1;
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
ci1[ip] += hdiag[ip] * civec[ip];
}
// Single excitation
else if ((n_excit_a + n_excit_b) == 1) {
int *ia;
// alpha->alpha
if (n_excit_b == 0) {
ia = get_single_excitation(stria, strja, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
// beta->beta
else if (n_excit_a == 0) {
ia = get_single_excitation(strib, strjb, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, strib, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
free(ia);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// alpha,alpha->alpha,alpha
if (n_excit_b == 0) {
int *ijab = get_double_excitation(stria, strja, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, stria, nset);
sign *= compute_cre_des_sign(a, j, stria, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, stria, nset);
sign *= compute_cre_des_sign(a, i, stria, nset);
}
ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// beta,beta->beta,beta
else if (n_excit_a == 0) {
int *ijab = get_double_excitation(strib, strjb, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, strib, nset);
sign *= compute_cre_des_sign(a, j, strib, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, strib, nset);
sign *= compute_cre_des_sign(a, i, strib, nset);
}
ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// alpha,beta->alpha,beta
else {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j];
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
ci1[ip] += sign * v * civec[jp];
free(ia);
free(jb);
}
}
} // end loop over jp
} // end loop over ip
} // end omp
}
// Compare two strings and compute excitation level
int n_excitations(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int d = 0;
for (p = 0; p < nset; ++p) {
d += popcount(str1[p] ^ str2[p]);
}
return d / 2;
}
// Compute number of set bits in a string
int popcount(uint64_t x) {
const uint64_t m1 = 0x5555555555555555; //binary: 0101...
const uint64_t m2 = 0x3333333333333333; //binary: 00110011..
const uint64_t m4 = 0x0f0f0f0f0f0f0f0f; //binary: 4 zeros, 4 ones ...
const uint64_t m8 = 0x00ff00ff00ff00ff; //binary: 8 zeros, 8 ones ...
const uint64_t m16 = 0x0000ffff0000ffff; //binary: 16 zeros, 16 ones ...
const uint64_t m32 = 0x00000000ffffffff; //binary: 32 zeros, 32 ones
x = (x & m1 ) + ((x >> 1) & m1 ); //put count of each 2 bits into those 2 bits
x = (x & m2 ) + ((x >> 2) & m2 ); //put count of each 4 bits into those 4 bits
x = (x & m4 ) + ((x >> 4) & m4 ); //put count of each 8 bits into those 8 bits
x = (x & m8 ) + ((x >> 8) & m8 ); //put count of each 16 bits into those 16 bits
x = (x & m16) + ((x >> 16) & m16); //put count of each 32 bits into those 32 bits
x = (x & m32) + ((x >> 32) & m32); //put count of each 64 bits into those 64 bits
return x;
}
// Compute orbital indices for a single excitation
int *get_single_excitation(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int *ia = malloc(sizeof(int) * 2);
for (p = 0; p < nset; ++p) {
size_t pp = nset - p - 1;
uint64_t str_tmp = str1[pp] ^ str2[pp];
uint64_t str_particle = str_tmp & str2[pp];
uint64_t str_hole = str_tmp & str1[pp];
if (popcount(str_particle) == 1) {
ia[1] = trailz(str_particle) + 64 * p;
}
if (popcount(str_hole) == 1) {
ia[0] = trailz(str_hole) + 64 * p;
}
}
return ia;
}
// Compute orbital indices for a double excitation
int *get_double_excitation(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int *ijab = malloc(sizeof(int) * 4);
int particle_ind = 2;
int hole_ind = 0;
for (p = 0; p < nset; ++p) {
size_t pp = nset - p - 1;
uint64_t str_tmp = str1[pp] ^ str2[pp];
uint64_t str_particle = str_tmp & str2[pp];
uint64_t str_hole = str_tmp & str1[pp];
int n_particle = popcount(str_particle);
int n_hole = popcount(str_hole);
if (n_particle == 1) {
ijab[particle_ind] = trailz(str_particle) + 64 * p;
particle_ind++;
}
else if (n_particle == 2) {
int a = trailz(str_particle);
ijab[2] = a + 64 * p;
str_particle &= ~(1ULL << a);
int b = trailz(str_particle);
ijab[3] = b + 64 * p;
}
if (n_hole == 1) {
ijab[hole_ind] = trailz(str_hole) + 64 * p;
hole_ind++;
}
else if (n_hole == 2) {
int i = trailz(str_hole);
ijab[0] = i + 64 * p;
str_hole &= ~(1ULL << i);
int j = trailz(str_hole);
ijab[1] = j + 64 * p;
}
}
return ijab;
}
// Compute number of trailing zeros in a bit string
int trailz(uint64_t v) {
int c = 64;
// Trick to unset all bits but the first one
v &= -(int64_t) v;
if (v) c--;
if (v & 0x00000000ffffffff) c -= 32;
if (v & 0x0000ffff0000ffff) c -= 16;
if (v & 0x00ff00ff00ff00ff) c -= 8;
if (v & 0x0f0f0f0f0f0f0f0f) c -= 4;
if (v & 0x3333333333333333) c -= 2;
if (v & 0x5555555555555555) c -= 1;
return c;
}
// Function to print int as a char for debug purposes
char *int2bin(uint64_t i) {
size_t bits = sizeof(uint64_t) * CHAR_BIT;
char * str = malloc(bits + 1);
if(!str) return NULL;
str[bits] = 0;
// type punning because signed shift is implementation-defined
uint64_t u = *(uint64_t *)&i;
for(; bits--; u >>= 1)
str[bits] = u & 1 ? '1' : '0';
return str;
}
// Compute sign for a pair of creation and desctruction operators
double compute_cre_des_sign(int p, int q, uint64_t *str, int nset) {
double sign;
int nperm;
size_t i;
int pg = p / 64;
int qg = q / 64;
int pb = p % 64;
int qb = q % 64;
if (pg > qg) {
nperm = 0;
for (i = nset-pg; i < nset-qg-1; ++i) {
nperm += popcount(str[i]);
}
nperm += popcount(str[nset -1 - pg] & ((1ULL << pb) - 1));
nperm += str[nset -1 - qg] >> (qb + 1);
}
else if (pg < qg) {
nperm = 0;
for (i = nset-qg; i < nset-pg-1; ++i) {
nperm += popcount(str[i]);
}
nperm += popcount(str[nset -1 - qg] & ((1ULL << qb) - 1));
nperm += str[nset -1 - pg] >> (pb + 1);
}
else {
uint64_t mask;
if (p > q) mask = (1ULL << pb) - (1ULL << (qb + 1));
else mask = (1ULL << qb) - (1ULL << (pb + 1));
nperm = popcount(str[pg] & mask);
}
if (nperm % 2) sign = -1.0;
else sign = 1.0;
return sign;
}
// Compute a list of occupied orbitals for a given string
int *compute_occ_list(uint64_t *string, int nset, int norb, int nelec) {
size_t k, i;
int *occ = malloc(sizeof(int) * nelec);
int off = 0;
int occ_ind = 0;
for (k = nset; k > 0; --k) {
int i_max = ((norb - off) < 64 ? (norb - off) : 64);
for (i = 0; i < i_max; ++i) {
int i_occ = (string[k-1] >> i) & 1;
if (i_occ) {
occ[occ_ind] = i + off;
occ_ind++;
}
}
off += 64;
}
return occ;
}
// Compute a list of occupied orbitals for a given string
int *compute_vir_list(uint64_t *string, int nset, int norb, int nelec) {
size_t k, i;
int *vir = malloc(sizeof(int) * (norb-nelec));
int off = 0;
int vir_ind = 0;
for (k = nset; k > 0; --k) {
int i_max = ((norb - off) < 64 ? (norb - off) : 64);
for (i = 0; i < i_max; ++i) {
int i_occ = (string[k-1] >> i) & 1;
if (!i_occ) {
vir[vir_ind] = i + off;
vir_ind++;
}
}
off += 64;
}
return vir;
}
// Select determinants to include in the CI space
void select_strs(double *h1, double *eri, double *jk, uint64_t *eri_sorted, uint64_t *jk_sorted, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet_start, uint64_t ndet_finish, double select_cutoff, uint64_t *strs_add, uint64_t* strs_add_size) {
size_t p, q, r, i, k, a, ip, jp, kp, lp, ij, iset, idet;
uint64_t max_strs_add = strs_add_size[0];
int nset = norb / 64 + 1;
// Compute Fock intermediates
double *focka = malloc(sizeof(double) * norb * norb);
double *fockb = malloc(sizeof(double) * norb * norb);
for (p = 0; p < norb; ++p) {
for (q = 0; q < norb; ++q) {
double vja = 0.0;
double vka = 0.0;
for (i = 0; i < neleca; ++i) {
size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q;
size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q;
vja += eri[iipq];
vka += eri[piiq];
}
double vjb = 0.0;
double vkb = 0.0;
for (i = 0; i < nelecb; ++i) {
size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q;
size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q;
vjb += eri[iipq];
vkb += eri[piiq];
}
focka[p * norb + q] = h1[p * norb + q] + vja + vjb - vka;
fockb[p * norb + q] = h1[p * norb + q] + vja + vjb - vkb;
}
}
int *holes_a = malloc(sizeof(int) * norb);
int *holes_b = malloc(sizeof(int) * norb);
int *particles_a = malloc(sizeof(int) * norb);
int *particles_b = malloc(sizeof(int) * norb);
uint64_t strs_added = 0;
// Loop over determinants
for (idet = ndet_start; idet < ndet_finish; ++idet) {
uint64_t *stra = strs + idet * 2 * nset;
uint64_t *strb = strs + idet * 2 * nset + nset;
int *occsa = compute_occ_list(stra, nset, norb, neleca);
int *occsb = compute_occ_list(strb, nset, norb, nelecb);
int *virsa = compute_vir_list(stra, nset, norb, neleca);
int *virsb = compute_vir_list(strb, nset, norb, nelecb);
double tol = select_cutoff / fabs(civec[idet]);
// Single excitations
int n_holes_a = 0;
int n_holes_b = 0;
int n_particles_a = 0;
int n_particles_b = 0;
for (p = 0; p < (norb - neleca); ++p) {
i = virsa[p];
if (i < neleca) {
holes_a[n_holes_a] = i;
n_holes_a++;
}
}
for (p = 0; p < neleca; ++p) {
i = occsa[p];
if (i >= neleca) {
particles_a[n_particles_a] = i;
n_particles_a++;
}
}
for (p = 0; p < (norb - nelecb); ++p) {
i = virsb[p];
if (i < nelecb) {
holes_b[n_holes_b] = i;
n_holes_b++;
}
}
for (p = 0; p < nelecb; ++p) {
i = occsb[p];
if (i >= nelecb) {
particles_b[n_particles_b] = i;
n_particles_b++;
}
}
// TODO: recompute Fock for each |Phi_I> and make sure it matches Fock in the code below
// alpha->alpha
for (p = 0; p < neleca; ++p) {
i = occsa[p];
for (q = 0; q < (norb - neleca); ++q) {
a = virsa[q];
double fai = focka[a * norb + i];
for (r = 0; r < n_particles_a; ++r) {
k = particles_a[r];
fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_a; ++r) {
k = holes_a[r];
fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_particles_b; ++r) {
k = particles_b[r];
fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_b; ++r) {
k = holes_b[r];
fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
if (fabs(fai) > tol) {
uint64_t *tmp = toggle_bit(stra, nset, a);
uint64_t *new_str = toggle_bit(tmp, nset, i);
for (iset = 0; iset < nset; ++iset) {
// new alpha string
strs_add[strs_added * 2 * nset + iset] = new_str[iset];
// old beta string
strs_add[strs_added * 2 * nset + nset + iset] = strb[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
}
// beta->beta
for (p = 0; p < nelecb; ++p) {
i = occsb[p];
for (q = 0; q < (norb - nelecb); ++q) {
a = virsb[q];
double fai = fockb[a * norb + i];
for (r = 0; r < n_particles_b; ++r) {
k = particles_b[r];
fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_b; ++r) {
k = holes_b[r];
fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_particles_a; ++r) {
k = particles_a[r];
fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_a; ++r) {
k = holes_a[r];
fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
if (fabs(fai) > tol) {
uint64_t *tmp = toggle_bit(strb, nset, a);
uint64_t *new_str = toggle_bit(tmp, nset, i);
for (iset = 0; iset < nset; ++iset) {
// old alpha string
strs_add[strs_added * 2 * nset + iset] = stra[iset];
// new beta string
strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
}
size_t ip_occ, jp_occ, kp_occ, lp_occ, ih;
// Double excitations
for (p = 0; p < norb * norb * norb * norb; ++p) {
ih = jk_sorted[p];
int aaaa_bbbb_done = (fabs(jk[ih]) < tol);
if (!aaaa_bbbb_done) {
lp = ih % norb;
ij = ih / norb;
kp = ij % norb;
ij = ij / norb;
jp = ij % norb;
ip = ij / norb;
// alpha,alpha->alpha,alpha
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < neleca; ++r) {
int occ_index = occsa[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(stra, nset, jp);
uint64_t *new_str = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(new_str, nset, lp);
new_str = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = new_str[iset];
strs_add[strs_added * 2 * nset + nset + iset] = strb[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
// beta,beta->beta,beta
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < nelecb; ++r) {
int occ_index = occsb[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(strb, nset, jp);
uint64_t *new_str = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(new_str, nset, lp);
new_str = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = stra[iset];
strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
// alpha,beta->alpha,beta
ih = eri_sorted[p];
int aabb_done = (fabs(eri[ih]) < tol);
if (!aabb_done) {
lp = ih % norb;
ij = ih / norb;
kp = ij % norb;
ij = ij / norb;
jp = ij % norb;
ip = ij / norb;
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < neleca; ++r) {
int occ_index = occsa[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
}
for (r = 0; r < nelecb; ++r) {
int occ_index = occsb[r];
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(stra, nset, jp);
uint64_t *new_str_a = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(strb, nset, lp);
uint64_t *new_str_b = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = new_str_a[iset];
strs_add[strs_added * 2 * nset + nset + iset] = new_str_b[iset];
}
free(tmp);
free(new_str_a);
free(new_str_b);
strs_added++;
}
}
// Break statement
if (aaaa_bbbb_done && aabb_done) {
break;
}
}
free(occsa);
free(occsb);
free(virsa);
free(virsb);
if (strs_added > max_strs_add) {
printf("\nError: Number of selected strings is greater than the size of the buffer array (%ld vs %ld).\n", strs_added, max_strs_add);
exit(EXIT_FAILURE);
}
} // end loop over determinants
free(focka);
free(fockb);
free(holes_a);
free(holes_b);
free(particles_a);
free(particles_b);
strs_add_size[0] = strs_added;
}
// Toggle bit at a specified position
uint64_t *toggle_bit(uint64_t *str, int nset, int p) {
size_t i;
uint64_t *new_str = malloc(sizeof(uint64_t) * nset);
for (i = 0; i < nset; ++i) {
new_str[i] = str[i];
}
int p_set = p / 64;
int p_rel = p % 64;
new_str[nset - p_set - 1] ^= 1ULL << p_rel;
return new_str;
}
// Compares two string indices and determines the order
int order(uint64_t *strs_i, uint64_t *strs_j, int nset) {
size_t i;
for (i = 0; i < nset; ++i) {
if (strs_i[i] > strs_j[i]) return 1;
else if (strs_j[i] > strs_i[i]) return -1;
}
return 0;
}
// Recursive quick sort of string array indices
void qsort_idx(uint64_t *strs, uint64_t *idx, uint64_t *nstrs_, int nset, uint64_t *new_idx) {
size_t p;
uint64_t nstrs = nstrs_[0];
if (nstrs <= 1) {
for (p = 0; p < nstrs; ++p) new_idx[p] = idx[p];
}
else {
uint64_t ref = idx[nstrs - 1];
uint64_t *group_lt = malloc(sizeof(uint64_t) * nstrs);
uint64_t *group_gt = malloc(sizeof(uint64_t) * nstrs);
uint64_t group_lt_nstrs = 0;
uint64_t group_gt_nstrs = 0;
for (p = 0; p < (nstrs - 1); ++p) {
uint64_t i = idx[p];
uint64_t *stri = strs + i * nset;
uint64_t *strj = strs + ref * nset;
int c = order(stri, strj, nset);
if (c == -1) {
group_lt[group_lt_nstrs] = i;
group_lt_nstrs++;
}
else if (c == 1) {
group_gt[group_gt_nstrs] = i;
group_gt_nstrs++;
}
}
uint64_t *new_idx_lt = malloc(sizeof(uint64_t) * group_lt_nstrs);
uint64_t *new_idx_gt = malloc(sizeof(uint64_t) * group_gt_nstrs);
qsort_idx(strs, group_lt, &group_lt_nstrs, nset, new_idx_lt);
qsort_idx(strs, group_gt, &group_gt_nstrs, nset, new_idx_gt);
nstrs = group_lt_nstrs + group_gt_nstrs + 1;
nstrs_[0] = nstrs;
for (p = 0; p < nstrs; ++p) {
if (p < group_lt_nstrs) new_idx[p] = new_idx_lt[p];
else if (p == group_lt_nstrs) new_idx[p] = ref;
else new_idx[p] = new_idx_gt[p - group_lt_nstrs - 1];
}
free(new_idx_lt);
free(new_idx_gt);
free(group_lt);
free(group_gt);
}
}
// Helper function to perform recursive sort (nset is a total number of strings)
void argunique(uint64_t *strs, uint64_t *sort_idx, uint64_t *nstrs_, int nset) {
size_t p;
uint64_t *init_idx = malloc(sizeof(uint64_t) * nstrs_[0]);
for (p = 0; p < nstrs_[0]; ++p) init_idx[p] = p;
qsort_idx(strs, init_idx, nstrs_, nset, sort_idx);
free(init_idx);
}
|
GB_unaryop__minv_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_uint64
// op(A') function: GB_tran__minv_uint16_uint64
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_uint64
(
uint16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
aomp_mappings.c | #include <stdio.h>
#include <omp.h>
#include <string.h>
//Shared Variables
int THREAD_LIMIT = 4;
int MAX_TEAMS = 128;
int GENERIC = 0;
int SPMD = 1;
int MAX_THREADS_PER_TEAM = 256;
int WARP_SIZE = 64;
/*
* Function: recordError
* Description: Updates error number and prints error messages
*/
void recordError(int* error , char *message, int iteration, int * array, unsigned long long *mask ){
(*error)++;
if(mask == NULL)
fprintf(stderr,"%s IS INCORRECT! Iteration: %d Value: %d\n", message, iteration, array[iteration]);
else
fprintf(stderr,"%s IS INCORRECT! Iteration: %d Value: %llx\n", message, iteration, mask[iteration]);
}
int main()
{
//Determine which GPU type (NVIDIA or AMD)
char* nvidia= "sm";
char* aomp_gpu= getenv("AOMP_GPU");
int isAMDGPU = 1;
if(aomp_gpu && strstr(aomp_gpu, nvidia) != NULL)
isAMDGPU = 0;
// a hacky way to know the default number of teams
#pragma omp target teams map(tofrom:MAX_TEAMS)
{
if (omp_get_team_num() == 0)
MAX_TEAMS = omp_get_num_teams();
}
fprintf(stderr, "MAX_TEAMS: %d\n", MAX_TEAMS);
//Logic for correct shared variables - AMD vs NVIDIA GPU
if(!isAMDGPU){
printf("%s\n", getenv("AOMP_GPU"));
MAX_THREADS_PER_TEAM = 128;
WARP_SIZE = 32;
}
int N = 128;
int NN = 1024;
int thread_num[NN];
int team_num[NN];
int default_dev[NN];
int warp_id[NN];
int lane_id[NN];
int smid[NN];
int is_spmd_mode[NN];
int master_thread_id[NN];
int num_teams[NN];
int num_threads[NN];
unsigned long long active_mask[NN];
unsigned long long mask = 0;
int i;
int correctTeamNum = -1;
int correctNumTeams = -1;
int correctWarpId = -1;
int remainder = 0;
int errors = 0;
//Initialize arrays
for (i=0; i<NN; i++)
active_mask[i] = 0;
for (i=0; i<NN; i++)
thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1;
fprintf(stderr,"#pragma omp target teams distribute parallel for thread_limit(4)\n");
#pragma omp target teams distribute parallel for thread_limit(4)
{
for (int j = 0; j< N; j++) {
thread_num[j] = omp_get_thread_num();
num_threads[j] = omp_get_num_threads();
team_num[j] = omp_get_team_num();
num_teams[j] = omp_get_num_teams();
default_dev[j] = omp_get_default_device();
warp_id[j] = omp_ext_get_warp_id();
lane_id[j] = omp_ext_get_lane_id();
active_mask[j] = omp_ext_get_active_threads_mask();
smid[j] = omp_ext_get_smid();
master_thread_id[j] = omp_ext_get_master_thread_id();
is_spmd_mode[j] = omp_ext_is_spmd_mode();
}
}
fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n");
for (i=0; i<N; i++)
fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n",
i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i], num_teams[i],active_mask[i]);
//Verify Results - #pragma omp target teams distribute parallel for thread_limit(4)
for (i = 0; i < N; i++){
//check thread #
if (thread_num[i] != i % THREAD_LIMIT)
recordError(&errors, "THREAD NUMBER", i, thread_num, NULL);
//check team #
if (i % THREAD_LIMIT == 0){
correctTeamNum++;
if(isAMDGPU)
correctTeamNum = correctTeamNum % MAX_TEAMS;
}
if (team_num[i] != correctTeamNum)
recordError(&errors, "TEAM NUMBER", i, team_num, NULL);
//check device #, We use default device (0) for testing
if (default_dev[i] != 0)
recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL);
//check warp #
if (warp_id[i] != 0)
recordError(&errors, "WARP NUMBER", i, warp_id, NULL);
//check lane #
if (lane_id[i] != i % THREAD_LIMIT)
recordError(&errors, "LANE NUMBER", i, lane_id, NULL);
//check master thread #
if (master_thread_id[i] != 0 )
recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL);
//check SPMD mode #
if (is_spmd_mode[i] != SPMD )
recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL);
//check num threads
if (num_threads[i] != THREAD_LIMIT )
recordError(&errors, "NUM THREADS", i, num_threads, NULL);
//check num teams
//If number of iterations is not divisible by THREAD_LIMIT get the ceiling
if(N % THREAD_LIMIT != 0)
correctNumTeams = ((N + num_threads[i]) / num_threads[i]);
else
correctNumTeams = N / THREAD_LIMIT;
if (correctNumTeams > MAX_TEAMS && isAMDGPU)
correctNumTeams = MAX_TEAMS;
if (num_teams[i] != correctNumTeams)
recordError(&errors, "NUM TEAMS", i, num_teams, NULL);
//check active mask
mask = 0;
if(N % THREAD_LIMIT != 0){
remainder = N % THREAD_LIMIT;
//set bit mask to proper value
for (int j = 0 ; j < remainder; j++){
mask = mask << 1;
mask = mask + 1;
}
}
//Mask for last evenly divided iteration
if (i < N - remainder){
mask = 0xf;
}
if (active_mask[i] != mask)
recordError(&errors, "ACTIVE MASK", i, NULL, active_mask);
}
//Reset Arrays
for (i=0; i<NN; i++)
active_mask[i] = 0;
for (i=0; i<NN; i++)
thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1;
fprintf(stderr,"#pragma omp target teams distribute parallel for\n");
#pragma omp target teams distribute parallel for
{
for (int j = 0; j< N; j++) {
thread_num[j] = omp_get_thread_num();
num_threads[j] = omp_get_num_threads();
team_num[j] = omp_get_team_num();
num_teams[j] = omp_get_num_teams();
default_dev[j] = omp_get_default_device();
warp_id[j] = omp_ext_get_warp_id();
lane_id[j] = omp_ext_get_lane_id();
active_mask[j] = omp_ext_get_active_threads_mask();
smid[j] = omp_ext_get_smid();
master_thread_id[j] = omp_ext_get_master_thread_id();
is_spmd_mode[j] = omp_ext_is_spmd_mode();
}
}
fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n");
for (i=0; i<N; i++)
fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n",
i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i], num_teams[i],active_mask[i]);
//Verify Results - #pragma omp target teams distribute parallel for
correctTeamNum = -1;
correctNumTeams = -1;
//int correctWarpId = -1;
//Verify Results
for (i = 0; i < N; i++){
//check thread #
if (thread_num[i] != i % MAX_THREADS_PER_TEAM)
recordError(&errors, "THREAD NUMBER", i, thread_num, NULL);
//check team #
if (i % MAX_THREADS_PER_TEAM == 0){
correctTeamNum++;
correctTeamNum = correctTeamNum % MAX_TEAMS;
}
if (team_num[i] != correctTeamNum)
recordError(&errors, "TEAM NUMBER", i, team_num, NULL);
//check device #, We use default device (0) for testing
if (default_dev[i] != 0)
recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL);
//check warp #
if (i % WARP_SIZE == 0){
correctWarpId++;
correctWarpId = correctWarpId % (MAX_THREADS_PER_TEAM/WARP_SIZE);
}
if (warp_id[i] != correctWarpId)
recordError(&errors, "WARP NUMBER", i, warp_id, NULL);
//check lane #
if (lane_id[i] != i % WARP_SIZE)
recordError(&errors, "LANE NUMBER", i, lane_id, NULL);
//check master thread #
if (master_thread_id[i] != MAX_THREADS_PER_TEAM - WARP_SIZE)
recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL);
//check SPMD mode #
if (is_spmd_mode[i] != SPMD )
recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL);
//check num threads
if (num_threads[i] != MAX_THREADS_PER_TEAM )
recordError(&errors, "NUM THREADS", i, num_threads, NULL);
//check num teams
//If number of iterations is not divisible by MAX_THREADS_PER_TEAM get the ceiling
if(N % MAX_THREADS_PER_TEAM != 0)
correctNumTeams = ((N + num_threads[i]) / num_threads[i]);
else
correctNumTeams = N / MAX_THREADS_PER_TEAM;
if (num_teams[i] != correctNumTeams)
recordError(&errors, "NUM TEAMS", i, num_teams, NULL);
//check active mask
remainder = 0;
mask = 0;
//Set mask for 64 or fewer active threads in first warp
if (N < WARP_SIZE + 1){
remainder = N % WARP_SIZE;
}
else
remainder = (N % MAX_THREADS_PER_TEAM) % WARP_SIZE;
//Set mask for warps with full (64) active threads
if (i < N - remainder){
if(isAMDGPU)
mask = 0xffffffffffffffff;
else
mask = 0xffffffff;
}
else{ //set mask for iterations with non full warps
mask = 0;
for (int j = 0 ; j < remainder; j++){
mask = mask << 1;
mask = mask + 1;
}
}
if (active_mask[i] != mask){
recordError(&errors, "ACTIVE MASK", i, NULL, active_mask);
}
}
//Reset Arrays
for (i=0; i<NN; i++)
active_mask[i] = 0;
for (i=0; i<NN; i++)
thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1;
fprintf(stderr,"#pragma omp target teams \n");
#pragma omp target teams
{
int j = omp_get_team_num();
thread_num[j] = omp_get_thread_num();
num_threads[j] = omp_get_num_threads();
team_num[j] = omp_get_team_num();
num_teams[j] = omp_get_num_teams();
default_dev[j] = omp_get_default_device();
warp_id[j] = omp_ext_get_warp_id();
lane_id[j] = omp_ext_get_lane_id();
active_mask[j] = omp_ext_get_active_threads_mask();
smid[j] = omp_ext_get_smid();
master_thread_id[j] = omp_ext_get_master_thread_id();
is_spmd_mode[j] = omp_ext_is_spmd_mode();
}
fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n");
for (i=0; i<N; i++)
fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n",
i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i],num_teams[i],active_mask[i]);
//Verify Results - #pragma omp target teams
correctTeamNum = -1;
correctNumTeams = -1;
//Verify Results
for (i = 0; i < N; i++){
//Only check iterations up to MAX_TEAMS
if(i < MAX_TEAMS){
//check thread #
if (thread_num[i] != 0)
recordError(&errors, "THREAD NUMBER", i, thread_num, NULL);
//check team #
if (team_num[i] != i)
recordError(&errors, "TEAM NUMBER", i, team_num, NULL);
//check device #, We use default device (0) for testing
if (default_dev[i] != 0)
recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL);
//check warp #
if (warp_id[i] != (MAX_THREADS_PER_TEAM - WARP_SIZE) / WARP_SIZE)
recordError(&errors, "WARP NUMBER", i, warp_id, NULL);
//check lane #
if (lane_id[i] != 0)
recordError(&errors, "LANE NUMBER", i, lane_id, NULL);
//check master thread #
if (master_thread_id[i] != MAX_THREADS_PER_TEAM - WARP_SIZE)
recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL);
//check SPMD mode #
if (is_spmd_mode[i] != GENERIC )
recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL);
//check num threads
if (num_threads[i] != 1 )
recordError(&errors, "NUM THREADS", i, num_threads, NULL);
//check num teams
//If number of iterations is not divisible by MAX_THREADS_PER_TEAM get the ceiling
if (num_teams[i] != MAX_TEAMS )
recordError(&errors, "NUM TEAMS", i, num_teams, NULL);
//check active mask
remainder = 0;
mask = 1;
if (active_mask[i] != mask){
recordError(&errors, "ACTIVE MASK", i, NULL, active_mask);
}
}
else{
if(thread_num[i] != -1 || team_num[i] != -1 || default_dev[i] != -1 || warp_id[i] != -1 || lane_id[i] != -1 || master_thread_id[i] != -1 || is_spmd_mode[i] != -1 || num_threads[i] != -1 || num_teams[i] != -1 || active_mask[i] != 0){
fprintf(stderr, "Data after iteration %d is changed and should be untouched!!\n", MAX_TEAMS - 1);
errors++;
}
}
}
//Print results and return total errors
if(!errors){
fprintf(stderr, "Success\n");
return 0;
}
else {
fprintf(stderr, "Fail\n");
fprintf(stderr, "Errors: %d\n", errors);
return 1;
}
}
|
fill_nr_3c.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include "config.h"
#include "cint.h"
int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
/*
* out[naoi,naoj,naok,comp] in F-order
*/
void GTOnr3c_fill_s1(int (*intor)(), double *out, double *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t nij = naoi * naoj;
const int dims[] = {naoi, naoj, naok};
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += jp * naoi + ip;
int ksh, k0;
int shls[3];
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
shls[2] = ksh;
k0 = ao_loc[ksh ] - ao_loc[ksh0];
(*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf);
}
}
static void dcopy_s2_igtj(double *out, double *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
static void dcopy_s2_ieqj(double *out, double *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
/*
* out[comp,naok,nij] in C-order
* nij = i1*(i1+1)/2 - i0*(i0+1)/2
* [ \ ]
* [**** ]
* [***** ]
* [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound
* [ \]
*/
void GTOnr3c_fill_s2ij(int (*intor)(), double *out, double *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
return;
}
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int i0 = ao_loc[ish0];
const int i1 = ao_loc[ish1];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off = i0 * (i0 + 1) / 2;
const size_t nij = i1 * (i1 + 1) / 2 - off;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
out += ip * (ip + 1) / 2 - off + jp;
int ksh, dk, k0;
int shls[3];
dk = GTOmax_shell_dim(ao_loc, shls_slice, 3);
double *cache = buf + di * dj * dk * comp;
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
k0 = ao_loc[ksh ] - ao_loc[ksh0];
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache);
if (ip != jp) {
dcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk);
} else {
dcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk);
}
}
}
void GTOnr3c_fill_s2jk(int (*intor)(), double *out, double *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
fprintf(stderr, "GTOnr3c_fill_s2jk not implemented\n");
exit(1);
}
void GTOnr3c_drv(int (*intor)(), void (*fill)(), double *eri, int comp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, fill, eri, comp, shls_slice, ao_loc, cintopt, \
atm, natm, bas, nbas, env)
{
int ish, jsh, ij;
double *buf = malloc(sizeof(double) * (di*di*di*comp + cache_size));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
(*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc,
cintopt, atm, natm, bas, nbas, env);
}
free(buf);
}
}
|
sparse.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*********************************************************************************
NAME: sparse
PURPOSE: This program tests the efficiency with which a sparse matrix
vector multiplication is carried out
USAGE: The program takes as input the number of threads, the 2log of the linear
size of the 2D grid (equalling the 2log of the square root of the order
of the sparse matrix), the radius of the difference stencil, and the number
of times the matrix-vector multiplication is carried out.
<progname> <# threads> <# iterations> <2log root-of-matrix-order> <radius>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
reverse()
NOTES:
HISTORY: Written by Rob Van der Wijngaart, August 2006.
Updated by RvdW to parallelize matrix generation, March 2007.
Updated by RvdW to fix verification bug, February 2013
Updated by RvdW to sort matrix elements to reflect traditional CSR storage,
August 2013
***********************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* linearize the grid index */
#define LIN(i,j) (i+((j)<<lsize))
/* if the scramble flag is set, convert all (linearized) grid indices by
reversing their bits; if not, leave the grid indices alone */
#if SCRAMBLE
#define REVERSE(a,b) reverse((a),(b))
#else
#define REVERSE(a,b) (a)
#endif
#define BITS_IN_BYTE 8
static u64Int reverse(register u64Int, int);
static int compare(const void *el1, const void *el2);
int main(int argc, char **argv){
int iter, r; /* dummies */
int lsize; /* logarithmic linear size of grid */
int lsize2; /* logarithmic size of grid */
int size; /* linear size of grid */
s64Int size2; /* matrix order (=total # points in grid) */
int radius, /* stencil parameters */
stencil_size;
s64Int row, col, first, last; /* dummies */
s64Int i, j; /* dummies */
int iterations; /* number of times the multiplication is done */
s64Int elm; /* sequence number of matrix nonzero */
s64Int nent; /* number of nonzero entries */
double sparsity; /* fraction of non-zeroes in matrix */
double sparse_time,/* timing parameters */
avgtime;
double * RESTRICT matrix; /* sparse matrix entries */
double * RESTRICT vector; /* vector multiplying the sparse matrix */
double * RESTRICT result; /* computed matrix-vector product */
double temp; /* temporary scalar storing reduction data */
double vector_sum; /* checksum of result */
double reference_sum; /* checksum of "rhs" */
double epsilon = 1.e-8; /* error tolerance */
s64Int * RESTRICT colIndex; /* column indices of sparse matrix entries */
int nthread_input, /* thread parameters */
nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
size_t vector_space, /* variables used to hold prk_malloc sizes */
matrix_space,
index_space;
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("OpenMP Sparse matrix-vector multiplication\n");
if (argc != 5) {
printf("Usage: %s <# threads> <# iterations> <2log grid size> <stencil radius>\n",*argv);
exit(EXIT_FAILURE);
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: Iterations must be positive : %d \n", iterations);
exit(EXIT_FAILURE);
}
lsize = atoi(*++argv);
lsize2 = 2*lsize;
size = 1<<lsize;
if (lsize <0) {
printf("ERROR: Log of grid size must be greater than or equal to zero: %d\n",
(int) lsize);
exit(EXIT_FAILURE);
}
/* compute number of points in the grid */
size2 = size*size;
radius = atoi(*++argv);
if (radius <0) {
printf("ERROR: Stencil radius must be non-negative: %d\n", (int) size);
exit(EXIT_FAILURE);
}
/* emit error if (periodic) stencil overlaps with itself */
if (size <2*radius+1) {
printf("ERROR: Grid extent %d smaller than stencil diameter 2*%d+1= %d\n",
size, radius, radius*2+1);
exit(EXIT_FAILURE);
}
/* compute total size of star stencil in 2D */
stencil_size = 4*radius+1;
/* sparsity follows from number of non-zeroes per row */
sparsity = (double)(4*radius+1)/(double)size2;
/* compute total number of non-zeroes */
nent = size2*stencil_size;
matrix_space = nent*sizeof(double);
if (matrix_space/sizeof(double) != nent) {
printf("ERROR: Cannot represent space for matrix: %zu\n", matrix_space);
exit(EXIT_FAILURE);
}
matrix = (double *) prk_malloc(matrix_space);
if (!matrix) {
printf("ERROR: Could not allocate space for sparse matrix: "FSTR64U"\n", nent);
exit(EXIT_FAILURE);
}
vector_space = 2*size2*sizeof(double);
if (vector_space/sizeof(double) != 2*size2) {
printf("ERROR: Cannot represent space for vectors: %zu\n", vector_space);
exit(EXIT_FAILURE);
}
vector = (double *) prk_malloc(vector_space);
if (!vector) {
printf("ERROR: Could not allocate space for vectors: %d\n", (int)(2*size2));
exit(EXIT_FAILURE);
}
result = vector + size2;
index_space = nent*sizeof(s64Int);
if (index_space/sizeof(s64Int) != nent) {
printf("ERROR: Cannot represent space for column indices: %zu\n", index_space);
exit(EXIT_FAILURE);
}
colIndex = (s64Int *) prk_malloc(index_space);
if (!colIndex) {
printf("ERROR: Could not allocate space for column indices: "FSTR64U"\n",
nent*sizeof(s64Int));
exit(EXIT_FAILURE);
}
#pragma omp parallel private (row, col, elm, first, last, iter)
{
#pragma omp master
{
nthread = omp_get_num_threads();
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %16d\n",nthread_input);
printf("Matrix order = "FSTR64U"\n", size2);
printf("Stencil diameter = %16d\n", 2*radius+1);
printf("Sparsity = %16.10lf\n", sparsity);
printf("Number of iterations = %16d\n", iterations);
#if SCRAMBLE
printf("Using scrambled indexing\n");
#else
printf("Using canonical indexing\n");
#endif
}
}
bail_out(num_error);
/* initialize the input and result vectors */
#pragma omp for
for (row=0; row<size2; row++) result[row] = vector[row] = 0.0;
/* fill matrix with nonzeroes corresponding to difference stencil. We use the
scrambling for reordering the points in the grid. */
#pragma omp for private (i,j,r)
for (row=0; row<size2; row++) {
j = row/size; i=row%size;
elm = row*stencil_size;
colIndex[elm] = REVERSE(LIN(i,j),lsize2);
for (r=1; r<=radius; r++, elm+=4) {
colIndex[elm+1] = REVERSE(LIN((i+r)%size,j),lsize2);
colIndex[elm+2] = REVERSE(LIN((i-r+size)%size,j),lsize2);
colIndex[elm+3] = REVERSE(LIN(i,(j+r)%size),lsize2);
colIndex[elm+4] = REVERSE(LIN(i,(j-r+size)%size),lsize2);
}
/* sort colIndex to make sure the compressed row accesses
vector elements in increasing order */
qsort(&(colIndex[row*stencil_size]), stencil_size, sizeof(s64Int), compare);
for (elm=row*stencil_size; elm<(row+1)*stencil_size; elm++)
matrix[elm] = 1.0/(double)(colIndex[elm]+1);
}
for (iter=0; iter<=iterations; iter++) {
/* start timer after a warmup iteration */
if (iter == 1) {
#pragma omp barrier
#pragma omp master
{
sparse_time = wtime();
}
}
/* fill vector */
#pragma omp for
for (row=0; row<size2; row++) vector[row] += (double) (row+1);
/* do the actual matrix-vector multiplication */
#pragma omp for
for (row=0; row<size2; row++) {
first = stencil_size*row; last = first+stencil_size-1;
temp=0.0;
/* #pragma omp simd reduction(+:temp) */
for (col=first; col<=last; col++) {
temp += matrix[col]*vector[colIndex[col]];
}
result[row] += temp;
}
} /* end of iterations */
#pragma omp barrier
#pragma omp master
{
sparse_time = wtime() - sparse_time;
}
} /* end of parallel region */
/* verification test */
reference_sum = 0.5 * (double) nent * (double) (iterations+1) *
(double) (iterations +2);
vector_sum = 0.0;
for (row=0; row<size2; row++) vector_sum += result[row];
if (ABS(vector_sum-reference_sum) > epsilon) {
printf("ERROR: Vector sum = %lf, Reference vector sum = %lf\n",
vector_sum, reference_sum);
exit(EXIT_FAILURE);
}
else {
printf("Solution validates\n");
#if VERBOSE
printf("Reference sum = %lf, vector sum = %lf\n",
reference_sum, vector_sum);
#endif
}
avgtime = sparse_time/iterations;
printf("Rate (MFlops/s): %lf Avg time (s): %lf\n",
1.0E-06 * (2.0*nent)/avgtime, avgtime);
exit(EXIT_SUCCESS);
}
/* Code below reverses bits in unsigned integer stored in a 64-bit word.
Bit reversal is with respect to the largest integer that is going to be
processed for the particular run of the code, to make sure the reversal
constitutes a true permutation. Hence, the final result needs to be shifted
to the right.
Example: if largest integer being processed is 0x000000ff = 255 =
0000...0011111111 (binary), then the unshifted reversal of 0x00000006 = 6 =
0000...0000000110 (binary) would be 011000000...0000 = 3*2^61, which is
outside the range of the original sequence 0-255. Setting shift_in_bits to
2log(256) = 8, the final result is shifted the the right by 64-8=56 bits,
so we get 000...0001100000 (binary) = 96, which is within the proper range */
u64Int reverse(register u64Int x, int shift_in_bits){
x = ((x >> 1) & 0x5555555555555555) | ((x << 1) & 0xaaaaaaaaaaaaaaaa);
x = ((x >> 2) & 0x3333333333333333) | ((x << 2) & 0xcccccccccccccccc);
x = ((x >> 4) & 0x0f0f0f0f0f0f0f0f) | ((x << 4) & 0xf0f0f0f0f0f0f0f0);
x = ((x >> 8) & 0x00ff00ff00ff00ff) | ((x << 8) & 0xff00ff00ff00ff00);
x = ((x >> 16) & 0x0000ffff0000ffff) | ((x << 16) & 0xffff0000ffff0000);
x = ((x >> 32) & 0x00000000ffffffff) | ((x << 32) & 0xffffffff00000000);
return (x>>((sizeof(u64Int)*BITS_IN_BYTE-shift_in_bits)));
}
int compare(const void *el1, const void *el2) {
s64Int v1 = *(s64Int *)el1;
s64Int v2 = *(s64Int *)el2;
return (v1<v2) ? -1 : (v1>v2) ? 1 : 0;
}
|
backup.c | // DFT Stefano: N = 8000, ser = 5 sec, omp (2 cores, HT) = 2 sec, optimisation = ?
// void norm(real_t data[], int_t len, int_t factor);
// void setarr(real_t data[], int_t len, real_t value);
// void setrnd(real_t arr[], int_t len);
// void setsin(real_t arr[], int_t len);
// void setdist(real_t arr[], int_t len);
// /* normalise array (divide every element by a factor given as parameter) */
// void norm(real_t data[], int_t len, int_t factor)
// {
// real_t invfac = (real_t)1.f / factor;
// #ifdef MY_OMP
// #pragma omp simd
// #endif
// for(int_t i = 0; i < len; ++i)
// {
// data[i] *= invfac;
// }
// }
// /* set array elements to a constant value (given as parameter) */
// void setarr(real_t arr[], int_t len, real_t value)
// {
// #ifdef MY_OMP
// #pragma omp simd
// #endif
// for(int_t i = 0; i < len; ++i)
// {
// arr[i] = value;
// }
// }
// /* set array elements to random values (scaled by length) */
// void setrnd(real_t arr[], int_t len)
// {
// real_t invlen = (real_t)1.f / len;
// #ifdef MY_OMP
// #pragma omp simd
// #endif
// for(int_t i = 0; i < len; ++i)
// {
// arr[i] = rand() * invlen;
// }
// }
// /* set array elements to one period of a sine function */
// void setsin(real_t arr[], int_t len)
// {
// for(int_t i = 0; i < len; ++i)
// {
// arr[i] = sin((real_t)(2 * M_PI * i / len));
// }
// }
// /* set array elements to a distorted sine function */
// void setdist(real_t arr[], int_t len)
// {
// real_t scale = 4 * (real_t)M_PI / len;
// #ifdef MY_OMP
// #pragma omp simd
// #endif
// for(int_t i = 0; i < len; ++i)
// {
// arr[i] = sin(i * scale) + (real_t)0.2f * sin(40 * i * scale);
// }
// }
// invert bits for each index.
// n is number of samples and a the array of the samples
/*void invert_bits(complex_t a[], int_t n)
{
int_t mv = n / 2;
int_t k, rev = 0;
complex_t b;
for (int_t i = 1; i < n; i++) // run tru all the indexes from 1 to n
{
k = i;
mv = n / 2;
rev = 0;
while (k > 0) // invert the actual index
{
if ((k % 2) > 0)
{
rev += mv;
}
k = k / 2;
mv = mv / 2;
}
// switch the actual sample and the bitinverted one
if (i < rev)
{
b = a[rev];
a[rev] = a[i];
a[i] = b;
}
}
}*/
// void fft_unroll(struct cmplx_t* d, struct cmplx_t** T, int_t len, int dir);
/* bit-reversal algorithm using Grey Code from Jennifer Elaan */
// void reversey(int* b, int len)
// {
// int i, jp, jn, k, s;
// int tmp;
// jp = 0;
// jn = 0;
// /*
// bsr = bit scan reverse, exists in x86 since Haswell
// bsf = bit scan forward, --------- '' --------------
// */
// s = bsr(len) - 1;
// for(i = 1; i < len; i++)
// {
// k = bsf(i);
// jp ^= (1<<k);
// jn ^= (1<<(s-k));
// if(jp<jn)
// {
// tmp = b[jp];
// b[jp] = b[jn];
// b[jn] = tmp;
// }
// }
// }
/* experimenting with manual loop unrolling - WIP */
// void fft_unroll(struct cmplx_t* __restrict__ d,
// struct cmplx_t** __restrict__ T,
// int_t len, int dir)
// {
// int_t ll = len >> 1;
// for(int_t M = 1, j = 0, len_M = ll; M <= ll; M <<= 1, ++j, len_M >>= 1)
// {
// for (int_t i = 0; i < len_M; ++i)
// {
// int_t l0 = (i << (j+1));
// int_t r0 = l0 + M;
// for (int_t k = 0, l = l0, r = r0;
// k < M-UNROLL && l < ll;
// k += UNROLL, l += UNROLL, r += UNROLL)
// {
// /* printf("l = %lu, r = %lu\n", l, r); */
// real_t Xev_re[UNROLL] = { RE(d[l]), RE(d[l+1]),
// RE(d[l+2]), RE(d[l+3]) };
// real_t Xev_im[UNROLL] = { dir * IM(d[l]), dir * IM(d[l+1]),
// dir * IM(d[l+2]), dir * IM(d[l+3]) };
// real_t tmp[UNROLL] = { dir * IM(d[r]), dir * IM(d[r+1]),
// dir * IM(d[r+2]), dir * IM(d[r+3]) };
// real_t Xod_re[UNROLL] =
// { RE(d[r]) * RE(T[j][k]) - tmp[0] * IM(T[j][k]),
// RE(d[r+1]) * RE(T[j][k+1]) - tmp[1] * IM(T[j][k+1]),
// RE(d[r+2]) * RE(T[j][k+2]) - tmp[2] * IM(T[j][k+2]),
// RE(d[r+3]) * RE(T[j][k+3]) - tmp[3] * IM(T[j][k+3]) };
// real_t Xod_im[UNROLL] =
// { RE(d[r]) * IM(T[j][k]) + tmp[0] * RE(T[j][k]),
// RE(d[r+1]) * IM(T[j][k+1]) + tmp[1] * RE(T[j][k+1]),
// RE(d[r+2]) * IM(T[j][k+2]) + tmp[2] * RE(T[j][k+2]),
// RE(d[r+3]) * IM(T[j][k+3]) + tmp[3] * RE(T[j][k+3]) };
// RE(d[l]) = Xev_re[0] + Xod_re[0];
// RE(d[l+1]) = Xev_re[1] + Xod_re[1];
// RE(d[l+2]) = Xev_re[2] + Xod_re[2];
// RE(d[l+2]) = Xev_re[3] + Xod_re[3];
// IM(d[l]) = dir * (Xev_im[0] + Xod_im[0]);
// IM(d[l+1]) = dir * (Xev_im[1] + Xod_im[1]);
// IM(d[l+2]) = dir * (Xev_im[2] + Xod_im[2]);
// IM(d[l+3]) = dir * (Xev_im[3] + Xod_im[3]);
// RE(d[r]) = Xev_re[0] - Xod_re[0];
// RE(d[r+1]) = Xev_re[1] - Xod_re[1];
// RE(d[r+2]) = Xev_re[2] - Xod_re[2];
// RE(d[r+3]) = Xev_re[3] - Xod_re[3];
// IM(d[r]) = dir * (Xev_im[0] - Xod_im[0]);
// IM(d[r+1]) = dir * (Xev_im[1] - Xod_im[1]);
// IM(d[r+2]) = dir * (Xev_im[2] - Xod_im[2]);
// IM(d[r+3]) = dir * (Xev_im[3] - Xod_im[3]);
// }
// }
// }
// } |
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if (((noise_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image,exception);
(void) NegateImage(charcoal_image,MagickFalse,exception);
(void) GrayscaleImage(charcoal_image,image->intensity,exception);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(colorize_image,q) <= (QuantumRange/2)))
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent],
statistic[MagickPathExtent];
const char
*value;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType) (1 << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*StringToDouble(value,(char **) NULL));
}
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",
standard_deviation);
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,size_t *,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
char
*q,
subexpression[MagickPathExtent],
symbol[MagickPathExtent];
const char
*p,
*value;
Image
*image;
PixelInfo
pixel;
double
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
depth,
length,
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
depth=0;
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
i=(ssize_t) alpha;
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(ssize_t) length;
if (length != 0)
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
GetPixelInfo(image,&pixel);
(void) InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
(void) CopyMagickString(name,p,MagickPathExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(
name),ClonePixelInfo(&pixel));
p+=strlen(name);
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case IndexPixelChannel:
return(0.0);
case IntensityPixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double)GetImageDepth(image, fx_info->exception));
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit((int) ((unsigned char) c)) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) ||
(strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit((int) ((unsigned char) c)) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,size_t *depth,double *beta,ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
char
*q,
subexpression[MagickPathExtent];
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity >= ErrorException)
return(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
return(0.0);
*subexpression='\0';
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) (~(size_t) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,
beta,exception));
return(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=fabs(floor((*beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta,
exception);
return(gamma);
}
case '=':
{
char
numeric[MagickPathExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
(void) FormatLocaleString(numeric,MagickPathExtent,"%g",*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(*depth)++;
if (*depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MagickPathExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
(*depth)--;
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(acosh(alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
return(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(asinh(alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(asin(alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atanh(alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(ceil(alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha < 0.0)
return(0.0);
if (alpha > 1.0)
return(1.0);
return(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(cosh(alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="opacity"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MagickPathExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
return(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (LocaleNCompare(expression,"erf",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(erf(alpha));
}
#endif
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
return(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
return(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
return((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha));
return(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(log(alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return(log10(alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gamma=alpha-floor((alpha/(*beta)))*(*beta);
return(gamma);
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
return(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
return(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
return(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0)
return(1.0);
gamma=sin((MagickPI*alpha))/(MagickPI*alpha);
return(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sinh(alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(sin(alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sqrt(alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(tanh(alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha >= 0.0)
return(floor(alpha));
return(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth,beta,exception);
} while (fabs(alpha) >= MagickEpsilon);
return(*beta);
}
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
size_t
depth;
depth=0;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
double
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if (((fx_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImage)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
Image
*canvas,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (implode_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas->columns;
center.y=0.5*canvas->rows;
radius=center.x;
if (canvas->columns > canvas->rows)
scale.y=(double) canvas->columns/(double) canvas->rows;
else
if (canvas->columns < canvas->rows)
{
scale.x=(double) canvas->rows/(double) canvas->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
interpolate_view=AcquireVirtualCacheView(canvas,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(canvas,implode_image,canvas->rows,1)
#endif
for (y=0; y < (ssize_t) canvas->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(implode_image,q);
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(implode_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas,i);
PixelTrait traits = GetPixelChannelTraits(canvas,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas,interpolate_view,implode_image,
method,(double) (factor*delta.x/scale.x+center.x),(double) (factor*
delta.y/scale.y+center.y),q,exception);
}
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(canvas,ImplodeImageTag,progress++,
canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if (((morph_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(morph_images,p) <= (QuantumRange/2)))
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const double pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *random_info,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
geometry[MagickPathExtent],
*text;
DrawInfo
*annotate_info;
ImageInfo
*image_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
image_info=AcquireImageInfo();
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
text=InterpretImageProperties(image_info,(Image *) image,caption,
exception);
image_info=DestroyImageInfo(image_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics,
&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*
(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
if (GetPixelWriteMask(random_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(random_image);
continue;
}
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image,exception);
(void) NegateImage(dodge_image,MagickFalse,exception);
(void) TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(image,GetPixelRed(left_image,p),r);
SetPixelGreen(image,GetPixelGreen(right_image,q),r);
SetPixelBlue(image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
Image
*canvas,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception);
swirl_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (swirl_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
center.x=(double) canvas->columns/2.0;
center.y=(double) canvas->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas->columns > canvas->rows)
scale.y=(double) canvas->columns/(double) canvas->rows;
else
if (canvas->columns < canvas->rows)
scale.x=(double) canvas->rows/(double) canvas->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(canvas,swirl_image,canvas->rows,1)
#endif
for (y=0; y < (ssize_t) canvas->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(swirl_image,q);
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(swirl_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas,i);
PixelTrait traits = GetPixelChannelTraits(canvas,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas,interpolate_view,swirl_image,
method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double)
((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception);
}
p+=GetPixelChannels(canvas);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(canvas,SwirlImageTag,progress++,canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait tint_traits=GetPixelChannelTraits(tint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(tint_traits == UndefinedPixelTrait))
continue;
if (((tint_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(tint_image,channel,p[i],q);
continue;
}
}
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(double) GetPixelRed(image,p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(double) GetPixelGreen(image,p)+color_vector.green*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(double) GetPixelBlue(image,p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(double) GetPixelBlack(image,p)+color_vector.black*(1.0-(4.0*
(weight*weight)));
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_view,
*wave_view;
Image
*canvas,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if ((canvas->alpha_trait == UndefinedPixelTrait) &&
(canvas->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas,OpaqueAlpha,exception);
wave_image=CloneImage(canvas,canvas->columns,(size_t) (canvas->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (double *) NULL)
{
canvas=DestroyImage(canvas);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(canvas,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas,canvas_view,wave_image,method,
(double) x,(double) (y-sine_map[x]),q,exception);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(canvas,WaveImageTag,progress++,canvas->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_view=DestroyCacheView(canvas_view);
canvas=DestroyImage(canvas);
sine_map=(double *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns),
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
collective_alltoall.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Implements the alltoall mixed mode OpenMP/MPI benchmark. */
/*-----------------------------------------------------------*/
#include "collective_alltoall.h"
int freeAlltoallData();
/*-----------------------------------------------------------*/
/* alltoall */
/* */
/* Driver routine for the alltoall benchmark. */
/*-----------------------------------------------------------*/
int alltoall() {
int dataSizeIter;
int bufferSize;
/* Initialise repsToDo to defaultReps */
repsToDo = defaultReps;
/* Start loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter */
while (dataSizeIter <= maxDataSize) {
/* Calculate bufferSize and allocate space for
* the data arrays.
*/
bufferSize = dataSizeIter * numThreads * numMPIprocs * numThreads;
allocateAlltoallData(bufferSize);
/* Perform warm-up of benchmark */
alltoallKernel(warmUpIters, dataSizeIter);
/* Test if alltoall was successful */
testAlltoall(dataSizeIter);
/* Initialise the benchmark */
benchComplete = FALSE;
/* Execute benchmark until target time is reached */
while (benchComplete != TRUE) {
/* Start timer */
MPI_Barrier(comm);
startTime = MPI_Wtime();
/* Execute alltoall for repsToDo repetitions */
alltoallKernel(repsToDo, dataSizeIter);
/* Stop timer */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Test if target time was reached */
if (myMPIRank == 0) { benchComplete = repTimeCheck(totalTime, repsToDo); }
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark result for reporting */
if (myMPIRank == 0) {
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free allocated data */
freeAlltoallData();
/* Double data size and loop again */
dataSizeIter = dataSizeIter * 2;
}
return 0;
}
/*-----------------------------------------------------------*/
/* alltoallKernel */
/* */
/* Implements the all to all benchmark. */
/* Each thread sends/receives dataSize items to/from */
/* every other process. */
/*-----------------------------------------------------------*/
int alltoallKernel(int totalReps, int dataSize) {
int repIter, i, j;
int dataForEachProc, numsToWrite;
int blockNum, startOffset;
/* Calculate how much data each thread sends to each process */
numsToWrite = numThreads * dataSize;
/* Calculate total amount of data each process receives
* from any other process....
* ...each thread gets dataSize items from every other thread.
*/
dataForEachProc = numThreads * numThreads * dataSize;
for (repIter = 0; repIter < totalReps; repIter++) {
/* Each thread writes numsToWrite items for each
* MPI process to alltoallSendBuf.
*/
#pragma omp parallel default(none) private(blockNum, i, j) \
shared(numsToWrite, dataForEachProc, globalIDarray) \
shared(alltoallSendBuf, numMPIprocs)
{
/* Calculate the blockNum of each thread.
* This is used to find which portion of the
* dataForEachProc elements a thread will
* be responsible for.
*/
blockNum = (myThreadID)*numsToWrite;
/* Write threadID to correct location in
* alltoallSendBuf.
*/
for (i = 0; i < numMPIprocs; i++) { /* loop over MPI processes */
for (j = 0; j < numsToWrite; j++) { /* loop over data to write */
alltoallSendBuf[blockNum + (i * dataForEachProc) + j] =
globalIDarray[myThreadID];
}
}
}
/* Call MPI_AlltoAll */
MPI_Alltoall(alltoallSendBuf, dataForEachProc, MPI_INT, alltoallRecvBuf,
dataForEachProc, MPI_INT, comm);
/* Each thread now reads the receive buffer so that it gets
* dataSize values from every other thread in its portion
* of alltoallFinalBuf.
*/
#pragma omp parallel default(none) private(blockNum, startOffset, i, j) \
shared(alltoallRecvBuf, alltoallFinalBuf, numMPIprocs) \
shared(dataForEachProc, numsToWrite, dataSize, globalIDarray) \
shared(numThreads)
{
/* Calculate the blockNum.
* This identifies which portion of the data from each
* process a thread is responsible for.
*/
blockNum = myThreadID * dataSize;
/* Calculate the offset into each MPI processes finalBuf
* where each thread will start to read its data.
*/
startOffset = (numsToWrite * numMPIprocs) * myThreadID;
/* Loop over all processors (threads & proceeses). */
for (i = 0; i < (numThreads * numMPIprocs); i++) {
for (j = 0; j < dataSize; j++) {
alltoallFinalBuf[startOffset + (i * dataSize) + j] =
alltoallRecvBuf[blockNum + (i * numsToWrite) + j];
}
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocateAlltoallData */
/* */
/* Allocates memory for the main data arrays used in the */
/* alltoall benchmark. */
/*-----------------------------------------------------------*/
int allocateAlltoallData(int bufferSize) {
alltoallSendBuf = (int *)malloc(bufferSize * sizeof(int));
alltoallRecvBuf = (int *)malloc(bufferSize * sizeof(int));
alltoallFinalBuf = (int *)malloc(bufferSize * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freeAlltoallData */
/* */
/* Free memory of the main data arrays. */
/*-----------------------------------------------------------*/
int freeAlltoallData() {
free(alltoallSendBuf);
free(alltoallRecvBuf);
free(alltoallFinalBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testAlltoall */
/* */
/* Verifies that the all to all completed successfully. */
/*-----------------------------------------------------------*/
int testAlltoall(int dataSize) {
int sizeofBuffer, i, j;
int dataForEachThread, startElem;
int testFlag, reduceFlag;
int *testBuf;
/* Set testFlag to true */
testFlag = TRUE;
/* calculate the size of the buffer on each process and allocate */
sizeofBuffer = dataSize * numThreads * numMPIprocs * numThreads;
testBuf = (int *)malloc(sizeofBuffer * sizeof(int));
/* Calculate how many elements each thread will work with */
dataForEachThread = dataSize * numThreads * numMPIprocs;
/* Fill buffer with expected values. */
#pragma omp parallel default(none) private(i, j, startElem) \
shared(testBuf, globalIDarray, sizeofBuffer, dataSize) \
shared(numThreads, numMPIprocs, dataForEachThread)
{
/* Calculate start element for each thread */
startElem = (myThreadID)*dataForEachThread;
for (i = 0; i < (numThreads * numMPIprocs); i++) {
for (j = 0; j < dataSize; j++) {
testBuf[startElem + (i * dataSize) + j] = i;
}
}
}
/* Compare */
for (i = 0; i < sizeofBuffer; i++) {
if (alltoallFinalBuf[i] != testBuf[i]) { testFlag = FALSE; }
}
/* Reduce testFlag with logical AND operator to
* get overall test result.
*/
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* Master then sets testOutcome flag */
if (myMPIRank == 0) { setTestOutcome(reduceFlag); }
/* Free space for testBuf */
free(testBuf);
return 0;
}
|
axpy_ompacc.c | // Experimental test input for Accelerator directives
// simplest scalar*vector operations
// Liao 1/15/2013
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
/* change this to do saxpy or daxpy : single precision or double precision*/
#define REAL double
#define VEC_LEN 1024000 //use a fixed number for now
/* zero out the entire vector */
void zero(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = 0.0;
}
}
/* initialize a vector with random floating point numbers */
void init(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = (double)drand48();
}
}
/*serial version */
void axpy(REAL* x, REAL* y, long n, REAL a) {
int i;
for (i = 0; i < n; ++i)
{
y[i] += a * x[i];
}
}
/* compare two arrays and return percentage of difference */
REAL check(REAL*A, REAL*B, int n)
{
int i;
REAL diffsum =0.0, sum = 0.0;
for (i = 0; i < n; i++) {
diffsum += fabs(A[i] - B[i]);
sum += fabs(B[i]);
}
return diffsum/sum;
}
void axpy_ompacc(REAL* x, REAL* y, int n, REAL a) {
int i;
/* this one defines both the target device name and data environment to map to,
I think here we need mechanism to tell the compiler the device type (could be multiple) so that compiler can generate the codes of different versions;
we also need to let the runtime know what the target device is so the runtime will chose the right function to call if the code are generated
#pragma omp target device (gpu0) map(x, y)
*/
#pragma omp target device (0) map(tofrom: y[0:n]) map(to: x[0:n],a,n)
#pragma omp parallel for shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
int main(int argc, char *argv[])
{
int n;
REAL *y_ompacc, *y, *x;
REAL a = 123.456;
n = VEC_LEN;
y_ompacc = (REAL *) malloc(n * sizeof(REAL));
y = (REAL *) malloc(n * sizeof(REAL));
x = (REAL *) malloc(n * sizeof(REAL));
srand48(1<<12);
init(x, n);
init(y_ompacc, n);
memcpy(y, y_ompacc, n*sizeof(REAL));
axpy(x, y, n, a);
/* openmp acc version */
axpy_ompacc(x, y_ompacc, n, a);
REAL checkresult = check(y_ompacc, y, n);
printf("axpy(%d): checksum: %g\n", n, checkresult);
assert (checkresult < 1.0e-10);
free(y_ompacc);
free(y);
free(x);
return 0;
}
|
kohonen_som_topology.c | /**
* \file
* \brief [Kohonen self organizing
* map](https://en.wikipedia.org/wiki/Self-organizing_map) (topological map)
*
* This example implements a powerful unsupervised learning algorithm called as
* a self organizing map. The algorithm creates a connected network of weights
* that closely follows the given data points. This thus creates a topological
* map of the given data i.e., it maintains the relationship between various
* data points in a much higher dimensional space by creating an equivalent in a
* 2-dimensional space.
* <img alt="Trained topological maps for the test cases in the program"
* src="https://raw.githubusercontent.com/TheAlgorithms/C/docs/images/machine_learning/kohonen/2D_Kohonen_SOM.svg"
* />
* \author [Krishna Vedala](https://github.com/kvedala)
* \warning MSVC 2019 compiler generates code that does not execute as expected.
* However, MinGW, Clang for GCC and Clang for MSVC compilers on windows perform
* as expected. Any insights and suggestions should be directed to the author.
* \see kohonen_som_trace.c
*/
#define _USE_MATH_DEFINES /**< required for MS Visual C */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP // check if OpenMP based parallellization is available
#include <omp.h>
#endif
/**
* @addtogroup machine_learning Machine learning algorithms
* @{
* @addtogroup kohonen_2d Kohonen SOM topology algorithm
* @{
*/
#ifndef max
/** shorthand for maximum value */
#define max(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef min
/** shorthand for minimum value */
#define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
/** to store info regarding 3D arrays */
struct kohonen_array_3d
{
int dim1; /**< lengths of first dimension */
int dim2; /**< lengths of second dimension */
int dim3; /**< lengths of thirddimension */
double *data; /**< pointer to data */
};
/** Function that returns the pointer to (x, y, z) ^th location in the
* linear 3D array given by:
* \f[
* X_{i,j,k} = i\times M\times N + j\times N + k
* \f]
* where \f$L\f$, \f$M\f$ and \f$N\f$ are the 3D matrix dimensions.
* \param[in] arr pointer to ::kohonen_array_3d structure
* \param[in] x first index
* \param[in] y second index
* \param[in] z third index
* \returns pointer to (x,y,z)^th location of data
*/
double *kohonen_data_3d(const struct kohonen_array_3d *arr, int x, int y, int z)
{
int offset = (x * arr->dim2 * arr->dim3) + (y * arr->dim3) + z;
return arr->data + offset;
}
/**
* Helper function to generate a random number in a given interval.
* \n Steps:
* 1. `r1 = rand() % 100` gets a random number between 0 and 99
* 2. `r2 = r1 / 100` converts random number to be between 0 and 0.99
* 3. scale and offset the random number to given range of \f$[a,b)\f$
* \f[
* y = (b - a) \times \frac{\text{(random number between 0 and RAND_MAX)} \;
* \text{mod}\; 100}{100} + a \f]
*
* \param[in] a lower limit
* \param[in] b upper limit
* \returns random number in the range \f$[a,b)\f$
*/
double _random(double a, double b)
{
return ((b - a) * (rand() % 100) / 100.f) + a;
}
/**
* Save a given n-dimensional data martix to file.
*
* \param[in] fname filename to save in (gets overwritten without confirmation)
* \param[in] X matrix to save
* \param[in] num_points rows in the matrix = number of points
* \param[in] num_features columns in the matrix = dimensions of points
* \returns 0 if all ok
* \returns -1 if file creation failed
*/
int save_2d_data(const char *fname, double **X, int num_points,
int num_features)
{
FILE *fp = fopen(fname, "wt");
if (!fp) // error with fopen
{
char msg[120];
sprintf(msg, "File error (%s): ", fname);
perror(msg);
return -1;
}
for (int i = 0; i < num_points; i++) // for each point in the array
{
for (int j = 0; j < num_features; j++) // for each feature in the array
{
fprintf(fp, "%.4g", X[i][j]); // print the feature value
if (j < num_features - 1) // if not the last feature
fputc(',', fp); // suffix comma
}
if (i < num_points - 1) // if not the last row
fputc('\n', fp); // start a new line
}
fclose(fp);
return 0;
}
/**
* Create the distance matrix or
* [U-matrix](https://en.wikipedia.org/wiki/U-matrix) from the trained weights
* and save to disk.
*
* \param [in] fname filename to save in (gets overwriten without confirmation)
* \param [in] W model matrix to save
* \returns 0 if all ok
* \returns -1 if file creation failed
*/
int save_u_matrix(const char *fname, struct kohonen_array_3d *W)
{
FILE *fp = fopen(fname, "wt");
if (!fp) // error with fopen
{
char msg[120];
sprintf(msg, "File error (%s): ", fname);
perror(msg);
return -1;
}
int R = max(W->dim1 >> 3, 2); /* neighborhood range */
for (int i = 0; i < W->dim1; i++) // for each x
{
for (int j = 0; j < W->dim2; j++) // for each y
{
double distance = 0.f;
int k;
int from_x = max(0, i - R);
int to_x = min(W->dim1, i + R + 1);
int from_y = max(0, j - R);
int to_y = min(W->dim2, j + R + 1);
int l;
#ifdef _OPENMP
#pragma omp parallel for reduction(+ : distance)
#endif
for (l = from_x; l < to_x; l++) // scan neighborhoor in x
{
for (int m = from_y; m < to_y; m++) // scan neighborhood in y
{
double d = 0.f;
for (k = 0; k < W->dim3; k++) // for each feature
{
double *w1 = kohonen_data_3d(W, i, j, k);
double *w2 = kohonen_data_3d(W, l, m, k);
d += (w1[0] - w2[0]) * (w1[0] - w2[0]);
// distance += w1[0] * w1[0];
}
distance += sqrt(d);
// distance += d;
}
}
distance /= R * R; // mean distance from neighbors
fprintf(fp, "%.4g", distance); // print the mean separation
if (j < W->dim2 - 1) // if not the last column
fputc(',', fp); // suffix comma
}
if (i < W->dim1 - 1) // if not the last row
fputc('\n', fp); // start a new line
}
fclose(fp);
return 0;
}
/**
* Get minimum value and index of the value in a matrix
* \param[in] X matrix to search
* \param[in] N number of points in the vector
* \param[out] val minimum value found
* \param[out] x_idx x-index where minimum value was found
* \param[out] y_idx y-index where minimum value was found
*/
void get_min_2d(double **X, int N, double *val, int *x_idx, int *y_idx)
{
val[0] = INFINITY; // initial min value
for (int i = 0; i < N; i++) // traverse each x-index
{
for (int j = 0; j < N; j++) // traverse each y-index
{
if (X[i][j] < val[0]) // if a lower value is found
{ // save the value and its index
x_idx[0] = i;
y_idx[0] = j;
val[0] = X[i][j];
}
}
}
}
/**
* Update weights of the SOM using Kohonen algorithm
*
* \param[in] X data point
* \param[in,out] W weights matrix
* \param[in,out] D temporary vector to store distances
* \param[in] num_out number of output points
* \param[in] num_features number of features per input sample
* \param[in] alpha learning rate \f$0<\alpha\le1\f$
* \param[in] R neighborhood range
* \returns minimum distance of sample and trained weights
*/
double kohonen_update_weights(const double *X, struct kohonen_array_3d *W,
double **D, int num_out, int num_features,
double alpha, int R)
{
int x, y, k;
double d_min = 0.f;
#ifdef _OPENMP
#pragma omp for
#endif
// step 1: for each 2D output point
for (x = 0; x < num_out; x++)
{
for (y = 0; y < num_out; y++)
{
D[x][y] = 0.f;
// compute Euclidian distance of each output
// point from the current sample
for (k = 0; k < num_features; k++)
{
double *w = kohonen_data_3d(W, x, y, k);
D[x][y] += (w[0] - X[k]) * (w[0] - X[k]);
}
D[x][y] = sqrt(D[x][y]);
}
}
// step 2: get closest node i.e., node with smallest Euclidian distance to
// the current pattern
int d_min_x, d_min_y;
get_min_2d(D, num_out, &d_min, &d_min_x, &d_min_y);
// step 3a: get the neighborhood range
int from_x = max(0, d_min_x - R);
int to_x = min(num_out, d_min_x + R + 1);
int from_y = max(0, d_min_y - R);
int to_y = min(num_out, d_min_y + R + 1);
// step 3b: update the weights of nodes in the
// neighborhood
#ifdef _OPENMP
#pragma omp for
#endif
for (x = from_x; x < to_x; x++)
{
for (y = from_y; y < to_y; y++)
{
/* you can enable the following normalization if needed.
personally, I found it detrimental to convergence */
// const double s2pi = sqrt(2.f * M_PI);
// double normalize = 1.f / (alpha * s2pi);
/* apply scaling inversely proportional to distance from the
current node */
double d2 =
(d_min_x - x) * (d_min_x - x) + (d_min_y - y) * (d_min_y - y);
double scale_factor = exp(-d2 / (2.f * alpha * alpha));
for (k = 0; k < num_features; k++)
{
double *w = kohonen_data_3d(W, x, y, k);
// update weights of nodes in the neighborhood
w[0] += alpha * scale_factor * (X[k] - w[0]);
}
}
}
return d_min;
}
/**
* Apply incremental algorithm with updating neighborhood and learning rates
* on all samples in the given datset.
*
* \param[in] X data set
* \param[in,out] W weights matrix
* \param[in] num_samples number of output points
* \param[in] num_features number of features per input sample
* \param[in] num_out number of output points
* \param[in] alpha_min terminal value of alpha
*/
void kohonen_som(double **X, struct kohonen_array_3d *W, int num_samples,
int num_features, int num_out, double alpha_min)
{
int R = num_out >> 2, iter = 0;
double **D = (double **)malloc(num_out * sizeof(double *));
for (int i = 0; i < num_out; i++)
D[i] = (double *)malloc(num_out * sizeof(double));
double dmin = 1.f; // average minimum distance of all samples
// Loop alpha from 1 to slpha_min
for (double alpha = 1.f; alpha > alpha_min && dmin > 1e-3;
alpha -= 0.001, iter++)
{
dmin = 0.f;
// Loop for each sample pattern in the data set
for (int sample = 0; sample < num_samples; sample++)
{
// update weights for the current input pattern sample
dmin += kohonen_update_weights(X[sample], W, D, num_out,
num_features, alpha, R);
}
// every 20th iteration, reduce the neighborhood range
if (iter % 100 == 0 && R > 1)
R--;
dmin /= num_samples;
printf("iter: %5d\t alpha: %.4g\t R: %d\td_min: %.4g\r", iter, alpha, R,
dmin);
}
putchar('\n');
for (int i = 0; i < num_out; i++) free(D[i]);
free(D);
}
/**
* @}
* @}
*/
/** Creates a random set of points distributed in four clusters in
* 3D space with centroids at the points
* * \f$(0,5, 0.5, 0.5)\f$
* * \f$(0,5,-0.5, -0.5)\f$
* * \f$(-0,5, 0.5, 0.5)\f$
* * \f$(-0,5,-0.5, -0.5)\f$
*
* \param[out] data matrix to store data in
* \param[in] N number of points required
*/
void test_2d_classes(double *const *data, int N)
{
const double R = 0.3; // radius of cluster
int i;
const int num_classes = 4;
const double centres[][2] = {
// centres of each class cluster
{.5, .5}, // centre of class 1
{.5, -.5}, // centre of class 2
{-.5, .5}, // centre of class 3
{-.5, -.5} // centre of class 4
};
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < N; i++)
{
int class =
rand() % num_classes; // select a random class for the point
// create random coordinates (x,y,z) around the centre of the class
data[i][0] = _random(centres[class][0] - R, centres[class][0] + R);
data[i][1] = _random(centres[class][1] - R, centres[class][1] + R);
/* The follosing can also be used
for (int j = 0; j < 2; j++)
data[i][j] = _random(centres[class][j] - R, centres[class][j] + R);
*/
}
}
/** Test that creates a random set of points distributed in four clusters in
* 2D space and trains an SOM that finds the topological pattern.
* The following [CSV](https://en.wikipedia.org/wiki/Comma-separated_values)
* files are created to validate the execution:
* * `test1.csv`: random test samples points with a circular pattern
* * `w11.csv`: initial random U-matrix
* * `w12.csv`: trained SOM U-matrix
*/
void test1()
{
int j, N = 300;
int features = 2;
int num_out = 30; // image size - N x N
// 2D space, hence size = number of rows * 2
double **X = (double **)malloc(N * sizeof(double *));
// cluster nodex in 'x' * cluster nodes in 'y' * 2
struct kohonen_array_3d W;
W.dim1 = num_out;
W.dim2 = num_out;
W.dim3 = features;
W.data = (double *)malloc(num_out * num_out * features *
sizeof(double)); // assign rows
for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out)
{
if (i < N) // only add new arrays if i < N
X[i] = (double *)malloc(features * sizeof(double));
if (i < num_out) // only add new arrays if i < num_out
{
for (int k = 0; k < num_out; k++)
{
#ifdef _OPENMP
#pragma omp for
#endif
// preallocate with random initial weights
for (j = 0; j < features; j++)
{
double *w = kohonen_data_3d(&W, i, k, j);
w[0] = _random(-5, 5);
}
}
}
}
test_2d_classes(X, N); // create test data around circumference of a circle
save_2d_data("test1.csv", X, N, features); // save test data points
save_u_matrix("w11.csv", &W); // save initial random weights
kohonen_som(X, &W, N, features, num_out, 1e-4); // train the SOM
save_u_matrix("w12.csv", &W); // save the resultant weights
for (int i = 0; i < N; i++) free(X[i]);
free(X);
free(W.data);
}
/** Creates a random set of points distributed in four clusters in
* 3D space with centroids at the points
* * \f$(0,5, 0.5, 0.5)\f$
* * \f$(0,5,-0.5, -0.5)\f$
* * \f$(-0,5, 0.5, 0.5)\f$
* * \f$(-0,5,-0.5, -0.5)\f$
*
* \param[out] data matrix to store data in
* \param[in] N number of points required
*/
void test_3d_classes1(double *const *data, int N)
{
const double R = 0.2; // radius of cluster
int i;
const int num_classes = 4;
const double centres[][3] = {
// centres of each class cluster
{.5, .5, .5}, // centre of class 1
{.5, -.5, -.5}, // centre of class 2
{-.5, .5, .5}, // centre of class 3
{-.5, -.5 - .5} // centre of class 4
};
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < N; i++)
{
int class =
rand() % num_classes; // select a random class for the point
// create random coordinates (x,y,z) around the centre of the class
data[i][0] = _random(centres[class][0] - R, centres[class][0] + R);
data[i][1] = _random(centres[class][1] - R, centres[class][1] + R);
data[i][2] = _random(centres[class][2] - R, centres[class][2] + R);
/* The follosing can also be used
for (int j = 0; j < 3; j++)
data[i][j] = _random(centres[class][j] - R, centres[class][j] + R);
*/
}
}
/** Test that creates a random set of points distributed in 4 clusters in
* 3D space and trains an SOM that finds the topological pattern. The following
* [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) files are created
* to validate the execution:
* * `test2.csv`: random test samples points
* * `w21.csv`: initial random U-matrix
* * `w22.csv`: trained SOM U-matrix
*/
void test2()
{
int j, N = 500;
int features = 3;
int num_out = 30; // image size - N x N
// 3D space, hence size = number of rows * 3
double **X = (double **)malloc(N * sizeof(double *));
// cluster nodex in 'x' * cluster nodes in 'y' * 2
struct kohonen_array_3d W;
W.dim1 = num_out;
W.dim2 = num_out;
W.dim3 = features;
W.data = (double *)malloc(num_out * num_out * features *
sizeof(double)); // assign rows
for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out)
{
if (i < N) // only add new arrays if i < N
X[i] = (double *)malloc(features * sizeof(double));
if (i < num_out) // only add new arrays if i < num_out
{
for (int k = 0; k < num_out; k++)
{
#ifdef _OPENMP
#pragma omp for
#endif
for (j = 0; j < features; j++)
{ // preallocate with random initial weights
double *w = kohonen_data_3d(&W, i, k, j);
w[0] = _random(-5, 5);
}
}
}
}
test_3d_classes1(X, N); // create test data
save_2d_data("test2.csv", X, N, features); // save test data points
save_u_matrix("w21.csv", &W); // save initial random weights
kohonen_som(X, &W, N, features, num_out, 1e-4); // train the SOM
save_u_matrix("w22.csv", &W); // save the resultant weights
for (int i = 0; i < N; i++) free(X[i]);
free(X);
free(W.data);
}
/** Creates a random set of points distributed in four clusters in
* 3D space with centroids at the points
* * \f$(0,5, 0.5, 0.5)\f$
* * \f$(0,5,-0.5, -0.5)\f$
* * \f$(-0,5, 0.5, 0.5)\f$
* * \f$(-0,5,-0.5, -0.5)\f$
*
* \param[out] data matrix to store data in
* \param[in] N number of points required
*/
void test_3d_classes2(double *const *data, int N)
{
const double R = 0.2; // radius of cluster
int i;
const int num_classes = 8;
const double centres[][3] = {
// centres of each class cluster
{.5, .5, .5}, // centre of class 1
{.5, .5, -.5}, // centre of class 2
{.5, -.5, .5}, // centre of class 3
{.5, -.5, -.5}, // centre of class 4
{-.5, .5, .5}, // centre of class 5
{-.5, .5, -.5}, // centre of class 6
{-.5, -.5, .5}, // centre of class 7
{-.5, -.5, -.5} // centre of class 8
};
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < N; i++)
{
int class =
rand() % num_classes; // select a random class for the point
// create random coordinates (x,y,z) around the centre of the class
data[i][0] = _random(centres[class][0] - R, centres[class][0] + R);
data[i][1] = _random(centres[class][1] - R, centres[class][1] + R);
data[i][2] = _random(centres[class][2] - R, centres[class][2] + R);
/* The follosing can also be used
for (int j = 0; j < 3; j++)
data[i][j] = _random(centres[class][j] - R, centres[class][j] + R);
*/
}
}
/** Test that creates a random set of points distributed in eight clusters in
* 3D space and trains an SOM that finds the topological pattern. The following
* [CSV](https://en.wikipedia.org/wiki/Comma-separated_values) files are created
* to validate the execution:
* * `test3.csv`: random test samples points
* * `w31.csv`: initial random U-matrix
* * `w32.csv`: trained SOM U-matrix
*/
void test3()
{
int j, N = 500;
int features = 3;
int num_out = 30;
double **X = (double **)malloc(N * sizeof(double *));
// cluster nodex in 'x' * cluster nodes in 'y' * 2
struct kohonen_array_3d W;
W.dim1 = num_out;
W.dim2 = num_out;
W.dim3 = features;
W.data = (double *)malloc(num_out * num_out * features *
sizeof(double)); // assign rows
for (int i = 0; i < max(num_out, N); i++) // loop till max(N, num_out)
{
if (i < N) // only add new arrays if i < N
X[i] = (double *)malloc(features * sizeof(double));
if (i < num_out) // only add new arrays if i < num_out
{
for (int k = 0; k < num_out; k++)
{
#ifdef _OPENMP
#pragma omp for
#endif
// preallocate with random initial weights
for (j = 0; j < features; j++)
{
double *w = kohonen_data_3d(&W, i, k, j);
w[0] = _random(-5, 5);
}
}
}
}
test_3d_classes2(X, N); // create test data around the lamniscate
save_2d_data("test3.csv", X, N, features); // save test data points
save_u_matrix("w31.csv", &W); // save initial random weights
kohonen_som(X, &W, N, features, num_out, 0.01); // train the SOM
save_u_matrix("w32.csv", &W); // save the resultant weights
for (int i = 0; i < N; i++) free(X[i]);
free(X);
free(W.data);
}
/**
* Convert clock cycle difference to time in seconds
*
* \param[in] start_t start clock
* \param[in] end_t end clock
* \returns time difference in seconds
*/
double get_clock_diff(clock_t start_t, clock_t end_t)
{
return (double)(end_t - start_t) / (double)CLOCKS_PER_SEC;
}
/** Main function */
int main(int argc, char **argv)
{
#ifdef _OPENMP
printf("Using OpenMP based parallelization\n");
#else
printf("NOT using OpenMP based parallelization\n");
#endif
clock_t start_clk, end_clk;
start_clk = clock();
test1();
end_clk = clock();
printf("Test 1 completed in %.4g sec\n",
get_clock_diff(start_clk, end_clk));
start_clk = clock();
test2();
end_clk = clock();
printf("Test 2 completed in %.4g sec\n",
get_clock_diff(start_clk, end_clk));
start_clk = clock();
test3();
end_clk = clock();
printf("Test 3 completed in %.4g sec\n",
get_clock_diff(start_clk, end_clk));
printf("(Note: Calculated times include: writing files to disk.)\n\n");
return 0;
}
|
heat.c | /*********************************************************************************/
/* */
/* Animation of heat equation in a planar domain */
/* */
/* N. Berglund, May 2021 */
/* */
/* Feel free to reuse, but if doing so it would be nice to drop a */
/* line to nils.berglund@univ-orleans.fr - Thanks! */
/* */
/* compile with */
/* gcc -o heat heat.c */
/* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */
/* */
/* To make a video, set MOVIE to 1 and create subfolder tif_heat */
/* It may be possible to increase parameter PAUSE */
/* */
/* create movie using */
/* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */
/* */
/*********************************************************************************/
/*********************************************************************************/
/* */
/* NB: The algorithm used to simulate the wave equation is highly paralellizable */
/* One could make it much faster by using a GPU */
/* */
/*********************************************************************************/
#include <math.h>
#include <string.h>
#include <GL/glut.h>
#include <GL/glu.h>
#include <unistd.h>
#include <sys/types.h>
#include <tiffio.h> /* Sam Leffler's libtiff library. */
#include <omp.h>
#define MOVIE 0 /* set to 1 to generate movie */
/* General geometrical parameters */
#define WINWIDTH 1280 /* window width */
#define WINHEIGHT 720 /* window height */
#define NX 1280 /* number of grid points on x axis */
#define NY 720 /* number of grid points on y axis */
// #define NX 640 /* number of grid points on x axis */
// #define NY 360 /* number of grid points on y axis */
/* setting NX to WINWIDTH and NY to WINHEIGHT increases resolution */
/* but will multiply run time by 4 */
#define XMIN -2.0
#define XMAX 2.0 /* x interval */
// #define XMIN -1.5
// #define XMAX 2.5 /* x interval */
#define YMIN -1.125
#define YMAX 1.125 /* y interval for 9/16 aspect ratio */
#define JULIA_SCALE 1.1 /* scaling for Julia sets */
// #define JULIA_SCALE 0.8 /* scaling for Julia sets */
/* Choice of the billiard table */
#define B_DOMAIN 25 /* choice of domain shape */
#define D_RECTANGLE 0 /* rectangular domain */
#define D_ELLIPSE 1 /* elliptical domain */
#define D_STADIUM 2 /* stadium-shaped domain */
#define D_SINAI 3 /* Sinai billiard */
#define D_DIAMOND 4 /* diamond-shaped billiard */
#define D_TRIANGLE 5 /* triangular billiard */
#define D_FLAT 6 /* flat interface */
#define D_ANNULUS 7 /* annulus */
#define D_POLYGON 8 /* polygon */
#define D_YOUNG 9 /* Young diffraction slits */
#define D_GRATING 10 /* diffraction grating */
#define D_EHRENFEST 11 /* Ehrenfest urn type geometry */
#define D_MENGER 15 /* Menger-Sierpinski carpet */
#define D_JULIA_INT 16 /* interior of Julia set */
/* Billiard tables for heat equation */
#define D_ANNULUS_HEATED 21 /* annulus with different temperatures */
#define D_MENGER_HEATED 22 /* Menger gasket with different temperatures */
#define D_MENGER_H_OPEN 23 /* Menger gasket with different temperatures and larger domain */
#define D_MANDELBROT 24 /* Mandelbrot set */
#define D_JULIA 25 /* Julia set */
#define D_MANDELBROT_CIRCLE 26 /* Mandelbrot set with circular conductor */
#define LAMBDA 0.7 /* parameter controlling the dimensions of domain */
#define MU 0.1 /* parameter controlling the dimensions of domain */
#define NPOLY 6 /* number of sides of polygon */
#define APOLY 1.0 /* angle by which to turn polygon, in units of Pi/2 */
#define MDEPTH 2 /* depth of computation of Menger gasket */
#define MRATIO 5 /* ratio defining Menger gasket */
#define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */
#define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */
#define FOCI 1 /* set to 1 to draw focal points of ellipse */
/* You can add more billiard tables by adapting the functions */
/* xy_in_billiard and draw_billiard in sub_wave.c */
/* Physical patameters of wave equation */
// #define DT 0.00001
#define DT 0.000004
// #define DT 0.000002
// #define DT 0.00000002
// #define DT 0.000000005
#define VISCOSITY 10.0
#define T_OUT 2.0 /* outside temperature */
#define T_IN 0.0 /* inside temperature */
// #define T_OUT 0.0 /* outside temperature */
// #define T_IN 2.0 /* inside temperature */
#define SPEED 0.0 /* speed of drift to the right */
/* Boundary conditions */
#define B_COND 0
#define BC_DIRICHLET 0 /* Dirichlet boundary conditions */
#define BC_PERIODIC 1 /* periodic boundary conditions */
#define BC_ABSORBING 2 /* absorbing boundary conditions (beta version) */
/* Parameters for length and speed of simulation */
#define NSTEPS 4500 /* number of frames of movie */
#define NVID 50 /* number of iterations between images displayed on screen */
// #define NVID 100 /* number of iterations between images displayed on screen */
#define NSEG 100 /* number of segments of boundary */
#define PAUSE 100 /* number of frames after which to pause */
#define PSLEEP 1 /* sleep time during pause */
#define SLEEP1 2 /* initial sleeping time */
#define SLEEP2 1 /* final sleeping time */
/* For debugging purposes only */
#define FLOOR 0 /* set to 1 to limit wave amplitude to VMAX */
#define VMAX 10.0 /* max value of wave amplitude */
/* Field representation */
#define FIELD_REP 0
#define F_INTENSITY 0 /* color represents intensity */
#define F_GRADIENT 1 /* color represents norm of gradient */
#define DRAW_FIELD_LINES 1 /* set to 1 to draw field lines */
#define FIELD_LINE_WIDTH 1 /* width of field lines */
#define N_FIELD_LINES 200 /* number of field lines */
#define FIELD_LINE_FACTOR 100 /* factor controlling precision when computing origin of field lines */
/* Color schemes */
#define BLACK 1 /* black background */
#define COLOR_SCHEME 1 /* choice of color scheme */
#define C_LUM 0 /* color scheme modifies luminosity (with slow drift of hue) */
#define C_HUE 1 /* color scheme modifies hue */
#define C_PHASE 2 /* color scheme shows phase */
#define SCALE 0 /* set to 1 to adjust color scheme to variance of field */
// #define SLOPE 0.1 /* sensitivity of color on wave amplitude */
#define SLOPE 0.3 /* sensitivity of color on wave amplitude */
#define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */
#define COLORHUE 260 /* initial hue of water color for scheme C_LUM */
#define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */
#define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */
#define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */
#define HUEMEAN 280.0 /* mean value of hue for color scheme C_HUE */
#define HUEAMP -110.0 /* amplitude of variation of hue for color scheme C_HUE */
// #define HUEMEAN 270.0 /* mean value of hue for color scheme C_HUE */
// #define HUEAMP -130.0 /* amplitude of variation of hue for color scheme C_HUE */
/* Basic math */
#define PI 3.141592654
#define DPI 6.283185307
#define PID 1.570796327
double julia_x = 0.0, julia_y = 0.0; /* parameters for Julia sets */
#include "sub_wave.c"
double courant2; /* Courant parameter squared */
double dx2; /* spatial step size squared */
double intstep; /* integration step */
double intstep1; /* integration step used in absorbing boundary conditions */
void init_gaussian(x, y, mean, amplitude, scalex, phi, xy_in)
/* initialise field with gaussian at position (x,y) */
double x, y, mean, amplitude, scalex, *phi[NX];
short int * xy_in[NX];
{
int i, j, in;
double xy[2], dist2, module, phase, scale2;
scale2 = scalex*scalex;
printf("Initialising field\n");
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
in = xy_in[i][j];
if (in == 1)
{
dist2 = (xy[0]-x)*(xy[0]-x) + (xy[1]-y)*(xy[1]-y);
module = amplitude*exp(-dist2/scale2);
if (module < 1.0e-15) module = 1.0e-15;
phi[i][j] = mean + module/scalex;
} /* boundary temperatures */
else if (in >= 2) phi[i][j] = T_IN*pow(0.75, (double)(in-2));
// else if (in >= 2) phi[i][j] = T_IN*pow(1.0 - 0.5*(double)(in-2), (double)(in-2));
// else if (in >= 2) phi[i][j] = T_IN*(1.0 - (double)(in-2)/((double)MDEPTH))*(1.0 - (double)(in-2)/((double)MDEPTH));
else phi[i][j] = T_OUT;
}
}
void init_julia_set(phi, xy_in)
/* change Julia set boundary condition */
double *phi[NX];
short int * xy_in[NX];
{
int i, j, in;
double xy[2], dist2, module, phase, scale2;
// printf("Changing Julia set\n");
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
in = xy_in[i][j];
if (in >= 2) phi[i][j] = T_IN;
}
}
/*********************/
/* animation part */
/*********************/
void compute_gradient(phi, nablax, nablay)
/* compute the gradient of the field */
double *phi[NX], *nablax[NX], *nablay[NX];
{
int i, j, iplus, iminus, jplus, jminus;
double dx;
dx = (XMAX-XMIN)/((double)NX);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
iplus = i+1; if (iplus == NX) iplus = NX-1;
iminus = i-1; if (iminus == -1) iminus = 0;
jplus = j+1; if (jplus == NX) jplus = NY-1;
jminus = j-1; if (jminus == -1) jminus = 0;
nablax[i][j] = (phi[iplus][j] - phi[iminus][j])/dx;
nablay[i][j] = (phi[i][jplus] - phi[i][jminus])/dx;
}
}
void draw_field_line(x, y, xy_in, nablax, nablay, delta, nsteps)
// void draw_field_line(x, y, nablax, nablay, delta, nsteps)
/* draw a field line of the gradient, starting in (x,y) */
double x, y, *nablax[NX], *nablay[NX], delta;
int nsteps;
short int *xy_in[NX];
{
double x1, y1, x2, y2, pos[2], nabx, naby, norm2, norm;
int i = 0, ij[2], cont = 1;
glColor3f(1.0, 1.0, 1.0);
glLineWidth(FIELD_LINE_WIDTH);
x1 = x;
y1 = y;
// printf("Drawing field line \n");
glEnable(GL_LINE_SMOOTH);
glBegin(GL_LINE_STRIP);
xy_to_pos(x1, y1, pos);
glVertex2d(pos[0], pos[1]);
i = 0;
while ((cont)&&(i < nsteps))
{
xy_to_ij(x1, y1, ij);
if (ij[0] < 0) ij[0] = 0;
if (ij[0] > NX-1) ij[0] = NX-1;
if (ij[1] < 0) ij[1] = 0;
if (ij[1] > NY-1) ij[1] = NY-1;
nabx = nablax[ij[0]][ij[1]];
naby = nablay[ij[0]][ij[1]];
norm2 = nabx*nabx + naby*naby;
if (norm2 > 1.0e-14)
{
/* avoid too large step size */
if (norm2 < 1.0e-9) norm2 = 1.0e-9;
norm = sqrt(norm2);
x1 = x1 + delta*nabx/norm;
y1 = y1 + delta*naby/norm;
}
else
{
cont = 0;
// nablax[ij[0]][ij[1]] = 0.0;
// nablay[ij[0]][ij[1]] = 0.0;
}
if (!xy_in[ij[0]][ij[1]]) cont = 0;
/* stop if the boundary is hit */
// if (xy_in[ij[0]][ij[1]] != 1) cont = 0;
// printf("x1 = %.3lg \t y1 = %.3lg \n", x1, y1);
xy_to_pos(x1, y1, pos);
glVertex2d(pos[0], pos[1]);
i++;
}
glEnd();
}
void draw_wave(phi, xy_in, scale, time)
/* draw the field */
double *phi[NX], scale;
short int *xy_in[NX];
int time;
{
int i, j, iplus, iminus, jplus, jminus, ij[2], counter = 0;
static int first = 1;
double rgb[3], xy[2], x1, y1, x2, y2, dx, value, angle, dangle, intens, deltaintens, sum = 0.0;
double *nablax[NX], *nablay[NX];
static double linex[N_FIELD_LINES*FIELD_LINE_FACTOR], liney[N_FIELD_LINES*FIELD_LINE_FACTOR], distance[N_FIELD_LINES*FIELD_LINE_FACTOR], integral[N_FIELD_LINES*FIELD_LINE_FACTOR + 1];
for (i=0; i<NX; i++)
{
nablax[i] = (double *)malloc(NY*sizeof(double));
nablay[i] = (double *)malloc(NY*sizeof(double));
}
/* compute the gradient */
// if (FIELD_REP > 0)
compute_gradient(phi, nablax, nablay);
/* compute the position of origins of field lines */
if ((first)&&(DRAW_FIELD_LINES))
{
first = 0;
printf("computing linex\n");
x1 = sqrt(3.58);
y1 = 0.0;
linex[0] = x1;
liney[0] = y1;
dangle = DPI/((double)(N_FIELD_LINES*FIELD_LINE_FACTOR));
for (i = 1; i < N_FIELD_LINES*FIELD_LINE_FACTOR; i++)
{
// angle = PID + (double)i*dangle;
angle = (double)i*dangle;
x2 = sqrt(3.58)*cos(angle);
y2 = sqrt(1.18)*sin(angle);
linex[i] = x2;
liney[i] = y2;
distance[i-1] = module2(x2-x1,y2-y1);
x1 = x2;
y1 = y2;
}
distance[N_FIELD_LINES*FIELD_LINE_FACTOR - 1] = module2(x2-sqrt(3.58),y2);
}
dx = (XMAX-XMIN)/((double)NX);
glBegin(GL_QUADS);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
if (FIELD_REP == F_INTENSITY) value = phi[i][j];
else if (FIELD_REP == F_GRADIENT)
{
value = module2(nablax[i][j], nablay[i][j]);
}
// if ((phi[i][j] - T_IN)*(phi[i][j] - T_OUT) < 0.0)
if (xy_in[i][j] == 1)
{
color_scheme(COLOR_SCHEME, value, scale, time, rgb);
glColor3f(rgb[0], rgb[1], rgb[2]);
}
else glColor3f(0.0, 0.0, 0.0);
glVertex2i(i, j);
glVertex2i(i+1, j);
glVertex2i(i+1, j+1);
glVertex2i(i, j+1);
}
glEnd ();
/* draw a field line */
if (DRAW_FIELD_LINES)
{
/* compute gradient norm along boundary and its integral */
for (i = 0; i < N_FIELD_LINES*FIELD_LINE_FACTOR; i++)
{
xy_to_ij(linex[i], liney[i], ij);
intens = module2(nablax[ij[0]][ij[1]], nablay[ij[0]][ij[1]])*distance[i];
if (i > 0) integral[i] = integral[i-1] + intens;
else integral[i] = intens;
}
deltaintens = integral[N_FIELD_LINES*FIELD_LINE_FACTOR-1]/((double)N_FIELD_LINES);
// deltaintens = integral[N_FIELD_LINES*FIELD_LINE_FACTOR-1]/((double)N_FIELD_LINES + 1.0);
// deltaintens = integral[N_FIELD_LINES*FIELD_LINE_FACTOR-1]/((double)N_FIELD_LINES);
// printf("delta = %.5lg\n", deltaintens);
i = 0;
// draw_field_line(linex[0], liney[0], nablax, nablay, 0.00002, 100000);
draw_field_line(linex[0], liney[0], xy_in, nablax, nablay, 0.00002, 100000);
for (j = 1; j < N_FIELD_LINES+1; j++)
{
while ((integral[i] <= j*deltaintens)&&(i < N_FIELD_LINES*FIELD_LINE_FACTOR)) i++;
// draw_field_line(linex[i], liney[i], nablax, nablay, 0.00002, 100000);
draw_field_line(linex[i], liney[i], xy_in, nablax, nablay, 0.00002, 100000);
counter++;
}
printf("%i lines\n", counter);
}
for (i=0; i<NX; i++)
{
free(nablax[i]);
free(nablay[i]);
}
}
void evolve_wave(phi, xy_in)
/* time step of field evolution */
double *phi[NX]; short int *xy_in[NX];
{
int i, j, iplus, iminus, jplus, jminus;
double delta1, delta2, x, y, *newphi[NX];;
for (i=0; i<NX; i++) newphi[i] = (double *)malloc(NY*sizeof(double));
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta1,delta2,x,y)
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j] == 1){
/* discretized Laplacian depending on boundary conditions */
if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING))
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
jplus = (j+1); if (jplus == NY) jplus = NY-1;
jminus = (j-1); if (jminus == -1) jminus = 0;
}
else if (B_COND == BC_PERIODIC)
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
}
delta1 = phi[iplus][j] + phi[iminus][j] + phi[i][jplus] + phi[i][jminus] - 4.0*phi[i][j];
x = phi[i][j];
/* evolve phi */
if (B_COND != BC_ABSORBING)
{
newphi[i][j] = x + intstep*(delta1 - SPEED*(phi[iplus][j] - phi[i][j]));
}
else /* case of absorbing b.c. - this is only an approximation of correct way of implementing */
{
/* in the bulk */
if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1))
{
newphi[i][j] = x - intstep*delta2;
}
/* right border */
else if (i==NX-1)
{
newphi[i][j] = x - intstep1*(x - phi[i-1][j]);
}
/* upper border */
else if (j==NY-1)
{
newphi[i][j] = x - intstep1*(x - phi[i][j-1]);
}
/* left border */
else if (i==0)
{
newphi[i][j] = x - intstep1*(x - phi[1][j]);
}
/* lower border */
else if (j==0)
{
newphi[i][j] = x - intstep1*(x - phi[i][1]);
}
}
if (FLOOR)
{
if (newphi[i][j] > VMAX) phi[i][j] = VMAX;
if (newphi[i][j] < -VMAX) phi[i][j] = -VMAX;
}
}
}
}
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j] == 1) phi[i][j] = newphi[i][j];
}
}
for (i=0; i<NX; i++)
{
free(newphi[i]);
}
// printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]);
}
double compute_variance(phi, xy_in)
/* compute the variance (total probability) of the field */
double *phi[NX]; short int * xy_in[NX];
{
int i, j, n = 0;
double variance = 0.0;
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
n++;
variance += phi[i][j]*phi[i][j];
}
}
if (n==0) n=1;
return(variance/(double)n);
}
void renormalise_field(phi, xy_in, variance)
/* renormalise variance of field */
double *phi[NX], variance;
short int * xy_in[NX];
{
int i, j;
double stdv;
stdv = sqrt(variance);
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
phi[i][j] = phi[i][j]/stdv;
}
}
}
void print_level(level)
int level;
{
double pos[2];
char message[50];
glColor3f(1.0, 1.0, 1.0);
sprintf(message, "Level %i", level);
xy_to_pos(XMIN + 0.1, YMAX - 0.2, pos);
write_text(pos[0], pos[1], message);
}
void print_Julia_parameters()
{
double pos[2];
char message[50];
glColor3f(1.0, 1.0, 1.0);
if (julia_y >= 0.0) sprintf(message, "c = %.5f + %.5f i", julia_x, julia_y);
else sprintf(message, "c = %.5f %.5f i", julia_x, julia_y);
xy_to_pos(XMIN + 0.1, YMAX - 0.2, pos);
write_text(pos[0], pos[1], message);
}
void set_Julia_parameters(time, phi, xy_in)
int time;
double *phi[NX];
short int *xy_in[NX];
{
double jangle, cosj, sinj, radius = 0.15;
jangle = (double)time*DPI/(double)NSTEPS;
// jangle = (double)time*0.001;
// jangle = (double)time*0.0001;
cosj = cos(jangle);
sinj = sin(jangle);
julia_x = -0.9 + radius*cosj;
julia_y = radius*sinj;
init_julia_set(phi, xy_in);
printf("Julia set parameters : i = %i, angle = %.5lg, cx = %.5lg, cy = %.5lg \n", time, jangle, julia_x, julia_y);
}
void set_Julia_parameters_cardioid(time, phi, xy_in)
int time;
double *phi[NX];
short int *xy_in[NX];
{
double jangle, cosj, sinj, yshift;
jangle = pow(1.05 + (double)time*0.00003, 0.333);
yshift = 0.02*sin((double)time*PID*0.002);
// jangle = pow(1.0 + (double)time*0.00003, 0.333);
// jangle = pow(0.05 + (double)time*0.00003, 0.333);
// jangle = pow(0.1 + (double)time*0.00001, 0.333);
// yshift = 0.04*sin((double)time*PID*0.002);
cosj = cos(jangle);
sinj = sin(jangle);
julia_x = 0.5*(cosj*(1.0 - 0.5*cosj) + 0.5*sinj*sinj);
julia_y = 0.5*sinj*(1.0-cosj) + yshift;
/* need to decrease 0.05 for i > 2000 */
// julia_x = 0.5*(cosj*(1.0 - 0.5*cosj) + 0.5*sinj*sinj);
// julia_y = 0.5*sinj*(1.0-cosj);
init_julia_set(phi, xy_in);
printf("Julia set parameters : i = %i, angle = %.5lg, cx = %.5lg, cy = %.5lg \n", time, jangle, julia_x, julia_y);
}
void animation()
{
double time, scale, dx, var, jangle, cosj, sinj;
double *phi[NX];
short int *xy_in[NX];
int i, j, s;
/* Since NX and NY are big, it seemed wiser to use some memory allocation here */
for (i=0; i<NX; i++)
{
phi[i] = (double *)malloc(NY*sizeof(double));
xy_in[i] = (short int *)malloc(NY*sizeof(short int));
}
dx = (XMAX-XMIN)/((double)NX);
intstep = DT/(dx*dx*VISCOSITY);
intstep1 = DT/(dx*VISCOSITY);
// julia_x = 0.1;
// julia_y = 0.6;
set_Julia_parameters(0, phi, xy_in);
printf("Integration step %.3lg\n", intstep);
/* initialize wave wave function */
init_gaussian(-1.0, 0.0, 0.1, 0.0, 0.01, phi, xy_in);
// init_gaussian(x, y, mean, amplitude, scalex, phi, xy_in)
if (SCALE)
{
var = compute_variance(phi, xy_in);
scale = sqrt(1.0 + var);
renormalise_field(phi, xy_in, var);
}
blank();
glColor3f(0.0, 0.0, 0.0);
glutSwapBuffers();
draw_wave(phi, xy_in, 1.0, 0);
draw_billiard();
print_Julia_parameters(i);
// print_level(MDEPTH);
glutSwapBuffers();
sleep(SLEEP1);
if (MOVIE) for (i=0; i<SLEEP1*25; i++) save_frame();
for (i=0; i<=NSTEPS; i++)
{
/* compute the variance of the field to adjust color scheme */
/* the color depends on the field divided by sqrt(1 + variance) */
if (SCALE)
{
var = compute_variance(phi, xy_in);
scale = sqrt(1.0 + var);
// printf("Norm: %5lg\t Scaling factor: %5lg\n", var, scale);
renormalise_field(phi, xy_in, var);
}
else scale = 1.0;
draw_wave(phi, xy_in, scale, i);
for (j=0; j<NVID; j++) evolve_wave(phi, xy_in);
draw_billiard();
// print_level(MDEPTH);
print_Julia_parameters(i);
glutSwapBuffers();
/* modify Julia set */
set_Julia_parameters(i, phi, xy_in);
if (MOVIE)
{
save_frame();
/* it seems that saving too many files too fast can cause trouble with the file system */
/* so this is to make a pause from time to time - parameter PAUSE may need adjusting */
if (i % PAUSE == PAUSE - 1)
{
printf("Making a short pause\n");
sleep(PSLEEP);
s = system("mv wave*.tif tif_heat/");
}
}
}
if (MOVIE)
{
for (i=0; i<20; i++) save_frame();
s = system("mv wave*.tif tif_heat/");
}
for (i=0; i<NX; i++)
{
free(phi[i]);
}
}
void display(void)
{
glPushMatrix();
blank();
glutSwapBuffers();
blank();
glutSwapBuffers();
animation();
sleep(SLEEP2);
glPopMatrix();
glutDestroyWindow(glutGetWindow());
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WINWIDTH,WINHEIGHT);
glutCreateWindow("Heat equation in a planar domain");
init();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
|
binbased_projection.h | // KRATOS __ __ _____ ____ _ _ ___ _ _ ____
// | \/ | ____/ ___|| | | |_ _| \ | |/ ___|
// | |\/| | _| \___ \| |_| || || \| | | _
// | | | | |___ ___) | _ || || |\ | |_| |
// |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Antonia Larese De Tetto
//
#if !defined(KRATOS_BINBASED_PROJECTION )
#define KRATOS_BINBASED_PROJECTION
//External includes
// System includes
#include <string>
#include <iostream>
#include <stdlib.h>
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "utilities/timer.h"
#include "meshing_application_variables.h"
//Database includes
#include "spatial_containers/spatial_containers.h"
#include "utilities/binbased_fast_point_locator.h"
#include "utilities/binbased_nodes_in_element_locator.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// This class allows the interpolation between non-matching meshes in 2D and 3D.
/** @author Antonia Larese De Tetto <antoldt@cimne.upc.edu>
*
* This class allows the interpolation of a scalar or vectorial variable between non-matching meshes
* in 2D and 3D.
*
* For every node of the destination model part it is checked in which element of the origin model part it is
* contained and a linear interpolation is performed
*
* The data structure used by default is static bin.
* In order to use this utility the construction of a bin of object @see BinBasedNodesInElementLocator
* and a bin of nodes @see BinBasedFastPointLocator
* is required at the beginning of the calculation (only ONCE).
*/
//class BinBasedMeshTransfer
template<std::size_t TDim >
class BinBasedMeshTransfer
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BinBasedMeshTransfer
KRATOS_CLASS_POINTER_DEFINITION(BinBasedMeshTransfer<TDim >);
/// Node type definition
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
BinBasedMeshTransfer() = default; //
/// Destructor.
virtual ~BinBasedMeshTransfer() = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
//If you want to pass the whole model part
//**********************************************************************
//**********************************************************************
/// Interpolate the whole problem type
/**
* @param rOrigin_ModelPart: the model part all the variable should be taken from
* @param rDestination_ModelPart: the destination model part where we want to know the values of the variables
*/
void DirectInterpolation(
ModelPart& rOrigin_ModelPart ,
ModelPart& rDestination_ModelPart
)
{
KRATOS_TRY
KRATOS_ERROR << "Not implemented yet" << std::endl;
KRATOS_CATCH("")
}
//If you want to pass only one variable
//**********************************************************************
//**********************************************************************
/// Interpolate one variable from the fixed mesh to the moving one
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of objects. It is to be constructed separately @see binbased_fast_point_locator.h
*/
// Form fixed to moving model part
template<class TDataType>
void DirectVariableInterpolation(
ModelPart& rFixed_ModelPart ,
ModelPart& rMoving_ModelPart,
Variable<TDataType>& rFixedDomainVariable ,
Variable<TDataType>& rMovingDomainVariable,
BinBasedFastPointLocator<TDim>& node_locator
)
{
KRATOS_TRY
KRATOS_INFO("BinBasedMeshTransfer") << "Interpolate From Fixed Mesh*************************************" << std::endl;
//creating an auxiliary list for the new nodes
for(auto node_it = rMoving_ModelPart.NodesBegin(); node_it != rMoving_ModelPart.NodesEnd(); ++node_it) {
ClearVariables(node_it, rMovingDomainVariable);
}
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rMoving_ModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++) {
ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i;
NodeType::Pointer pparticle = *(iparticle.base());
auto result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true) {
//Interpolate( ElemIt, N, *it_found , rFixedDomainVariable , rMovingDomainVariable );
Interpolate( pelement, N, pparticle, rFixedDomainVariable , rMovingDomainVariable );
}
}
KRATOS_CATCH("")
}
/// Map one variable from the moving mesh to the fixed one -The two meshes should be of the same dimensions otherwise better to use
/// MappingFromMovingMesh_VariableMeshes that is a much generic tool.
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of objects (elelments of the fixed mesh). It is to be constructed separately @see binbased_nodes_in_element_locator
*/
// From moving to fixed model part
template<class TDataType>
void MappingFromMovingMesh(
ModelPart& rMoving_ModelPart ,
ModelPart& rFixed_ModelPart,
Variable<TDataType>& rMovingDomainVariable ,
Variable<TDataType>& rFixedDomainVariable,
BinBasedFastPointLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part
)
{
KRATOS_TRY
KRATOS_INFO("BinBasedMeshTransfer") << "Transfer From Moving Mesh*************************************" << std::endl;
if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", "");
if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", "");
//creating an auxiliary list for the new nodes
for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); ++node_it)
{
ClearVariables(node_it, rFixedDomainVariable);
}
for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); node_it++)
{
// if (node_it->IsFixed(VELOCITY_X) == false)
// {
// (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3);
// (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0;
(node_it)->GetValue(YOUNG_MODULUS) = 0.0;
// }
}
//defintions for spatial search
// typedef NodeType PointType;
// typedef NodeType::Pointer PointTypePointer;
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rMoving_ModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i;
NodeType::Pointer pparticle = *(iparticle.base());
auto result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
GeometryType& geom = pelement->GetGeometry();
// const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY);
// const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE);
const TDataType& value = (iparticle)->FastGetSolutionStepValue(rMovingDomainVariable);
for (std::size_t k = 0; k < geom.size(); k++)
{
geom[k].SetLock();
geom[k].FastGetSolutionStepValue(rFixedDomainVariable) += N[k] * value;
geom[k].GetValue(YOUNG_MODULUS) += N[k];
geom[k].UnSetLock();
}
}
}
for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); node_it++)
{
const double NN = (node_it)->GetValue(YOUNG_MODULUS);
if (NN != 0.0)
{
(node_it)->FastGetSolutionStepValue(rFixedDomainVariable) /= NN;
}
}
KRATOS_CATCH("")
}
// From moving to fixed model part
/// Interpolate one variable from the moving mesh to the fixed one
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of nodes of the fixed mesh. It is to be constructed separately @see binbased_nodes_in_element_locator
*/
template<class TDataType>
void MappingFromMovingMesh_VariableMeshes(
ModelPart& rMoving_ModelPart ,
ModelPart& rFixed_ModelPart,
Variable<TDataType>& rMovingDomainVariable ,
Variable<TDataType>& rFixedDomainVariable,
BinBasedNodesInElementLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part
)
{
KRATOS_TRY
KRATOS_WATCH("Transfer From Moving Mesh*************************************")
if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", "");
if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", "");
//creating an auxiliary list for the new nodes
for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); ++node_it)
{
ClearVariables(node_it, rFixedDomainVariable);
}
//defintions for spatial search
typedef typename BinBasedNodesInElementLocator<TDim>::PointVector PointVector;
typedef typename BinBasedNodesInElementLocator<TDim>::DistanceVector DistanceVector;
const std::size_t max_results = 5000;
Matrix Nmat(max_results,TDim+1);
boost::numeric::ublas::vector<int> positions(max_results);
PointVector work_results(max_results);
DistanceVector work_distances(max_results);
Node<3> work_point(0,0.0,0.0,0.0);
for(ModelPart::ElementsContainerType::iterator elem_it = rMoving_ModelPart.ElementsBegin(); elem_it != rMoving_ModelPart.ElementsEnd(); ++elem_it)
{
std::size_t nfound = node_locator.FindNodesInElement(*(elem_it.base()), positions, Nmat, max_results, work_results.begin(), work_distances.begin(), work_point);
for(std::size_t k=0; k<nfound; k++)
{
auto it = work_results.begin() + positions[k];
array_1d<double,TDim+1> N = row(Nmat,k);
Interpolate( *(elem_it.base()), N, *it, rMovingDomainVariable , rFixedDomainVariable);
}
}
KRATOS_CATCH("")
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member rVariables
///@{
///@}
///@name Protected member rVariables
///@{ template<class T, std::size_t dim>
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member rVariables
///@{
///@}
///@name Member rVariables
///@{
inline void CalculateCenterAndSearchRadius(GeometryType&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double,3>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
xc = 0.3333333333333333333*(x0+x1+x2);
yc = 0.3333333333333333333*(y0+y1+y2);
zc = 0.0;
double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0);
double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1);
double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2);
R = R1;
if(R2 > R) R = R2;
if(R3 > R) R = R3;
R = 1.01 * sqrt(R);
}
//***************************************
//***************************************
inline void CalculateCenterAndSearchRadius(GeometryType&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double,4>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
xc = 0.25*(x0+x1+x2+x3);
yc = 0.25*(y0+y1+y2+y3);
zc = 0.25*(z0+z1+z2+z3);
double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0) + (zc-z0)*(zc-z0);
double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1) + (zc-z1)*(zc-z1);
double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2) + (zc-z2)*(zc-z2);
double R4 = (xc-x3)*(xc-x3) + (yc-y3)*(yc-y3) + (zc-z3)*(zc-z3);
R = R1;
if(R2 > R) R = R2;
if(R3 > R) R = R3;
if(R4 > R) R = R4;
R = sqrt(R);
}
//***************************************
//***************************************
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) );
}
//***************************************
//***************************************
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ*0.1666666666666666666667;
//return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) );
}
//***************************************
//***************************************
inline bool CalculatePosition( GeometryType&geom,
const double xc, const double yc, const double zc,
array_1d<double,3>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0,y0,x1,y1,x2,y2);
double inv_area = 0.0;
if(area == 0.0)
{
// KRATOS_THROW_ERROR(std::logic_error,"element with zero area found","");
//The interpolated node will not be inside an elemente with zero area
return false;
}
else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1,y1,x2,y2,xc,yc) * inv_area;
N[1] = CalculateVol(x2,y2,x0,y0,xc,yc) * inv_area;
N[2] = CalculateVol(x0,y0,x1,y1,xc,yc) * inv_area;
if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <=1.0 && N[1]<= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition( GeometryType&geom,
const double xc, const double yc, const double zc,
array_1d<double,4>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3);
double inv_vol = 0.0;
if(vol < 0.0000000000001)
{
// KRATOS_THROW_ERROR(std::logic_error,"element with zero vol found","");
//The interpolated node will not be inside an elemente with zero volume
return false;
// KRATOS_WATCH("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
}
else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1,y1,z1,x3,y3,z3,x2,y2,z2,xc,yc,zc) * inv_vol;
N[1] = CalculateVol(x3,y3,z3,x0,y0,z0,x2,y2,z2,xc,yc,zc) * inv_vol;
N[2] = CalculateVol(x3,y3,z3,x1,y1,z1,x0,y0,z0,xc,yc,zc) * inv_vol;
N[3] = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,xc,yc,zc) * inv_vol;
if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >=0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <=1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
//ElemI Element iterator
//N Shape functions
//step_data_size
//pnode pointer to the node
//projecting total model part 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
int step_data_size,
NodeType::Pointer pnode)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
double* step_data = (pnode)->SolutionStepData().Data(step);
double* node0_data = geom[0].SolutionStepData().Data(step);
//copying this data in the position of the vector we are interested in
for(int j= 0; j< step_data_size; j++) {
step_data[j] = N[0]*node0_data[j];
}
for(std::size_t k= 1; k< vector_size; k++) {
double* node1_data = geom[k].SolutionStepData().Data(step);
for(int j= 0; j< step_data_size; j++) {
step_data[j] += N[k]*node1_data[j];
}
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
//projecting an array1D 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
NodeType::Pointer pnode,
Variable<array_1d<double,3> >& rOriginVariable,
Variable<array_1d<double,3> >& rDestinationVariable)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
array_1d<double,3>& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step);
//Reference or no reference???//CANCELLA
step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step);
// Copying this data in the position of the vector we are interested in
for(std::size_t j= 1; j< vector_size; j++) {
const array_1d<double,3>& node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step);
step_data += N[j] * node_data;
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
//projecting a scalar 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
NodeType::Pointer pnode,
Variable<double>& rOriginVariable,
Variable<double>& rDestinationVariable)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
//facendo un loop sugli step temporali step_data come salva i dati al passo anteriore? Cioś dove passiamo l'informazione ai nodi???
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
double& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step);
//Reference or no reference???//CANCELLA
//copying this data in the position of the vector we are interested in
step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step);
// Copying this data in the position of the vector we are interested in
for(std::size_t j= 1; j< vector_size; j++) {
const double node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step);
step_data += N[j] * node_data;
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
inline void Clear(ModelPart::NodesContainerType::iterator node_it, int step_data_size )
{
std::size_t buffer_size = node_it->GetBufferSize();
for(std::size_t step = 0; step<buffer_size; step++)
{
//getting the data of the solution step
double* step_data = (node_it)->SolutionStepData().Data(step);
//copying this data in the position of the vector we are interested in
for(int j= 0; j< step_data_size; j++)
{
step_data[j] = 0.0;
}
}
}
inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it , Variable<array_1d<double,3> >& rVariable)
{
array_1d<double, 3>& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0);
noalias(Aux_var) = ZeroVector(3);
}
inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it, Variable<double>& rVariable)
{
double& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0);
Aux_var = 0.0;
}
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BinBasedMeshTransfer& operator=(BinBasedMeshTransfer const& rOther);
///@}
}; // Class BinBasedMeshTransfer
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// output stream function
template<std::size_t TDim>
inline std::ostream& operator << (std::ostream& rOStream,
const BinBasedMeshTransfer<TDim>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_BINBASED_PROJECTION defined
|
FasterGossipCommMulti.h | /* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#pragma once
#include "FasterGossipCommMultiTraits.h"
#include "mpi.h"
#include <ucp/api/ucp.h>
#include <hwloc.h>
#include <hwloc/cudart.h>
#include <algorithm>
#include <omp.h>
#define WARM_UP_ROUND 2
namespace FasterGossipCommMulti{
// The empty call back function for UCP communication API
void empty_send_callback_func (void * request, ucs_status_t status) {}
void empty_recv_callback_func (void * request, ucs_status_t status, ucp_tag_recv_info_t *info) {}
template<typename data_t_, typename GossipMultiCommTraits>
class FasterGossipCommMulti;
template<typename data_t_>
class FasterGossipCommMulti<data_t_, FasterGossipCommMultiAll2AllTraits<data_t_>>{
public:
using GossipMultiCommTraits = FasterGossipCommMultiAll2AllTraits<data_t_>;
using FasterGossipComm = typename GossipMultiCommTraits::FasterGossipComm;
using gpu_id_t = typename GossipMultiCommTraits::gpu_id_t;
// Ctor
FasterGossipCommMulti(const std::string& plan_file, const std::vector<gpu_id_t>& GPU_list,
const int num_proc, const int rank, MPI_Comm comm)
: GPU_list_(GPU_list), rank_(rank), num_proc_(num_proc), comm_(comm), GossipCommHandle_(num_proc_),
local_buffer_(GPU_list_.size()), recv_buffer_(GPU_list_.size()), temp_buf_(GPU_list_.size()),
temp_table_( GPU_list_.size() , std::vector<size_t>( GPU_list_.size() ) ),
temp_src_(GPU_list_.size()), temp_dst_(GPU_list_.size()), affinity_list_(num_proc_),
send_reqs_(GPU_list_.size(), nullptr), recv_reqs_(GPU_list_.size(), nullptr){
// Do some check
assert( (num_proc_ > 0) && "The number of process is not greater than 0!\n" );
assert( (rank_ >= 0) && (rank_ < num_proc_) && "The rank of this process is not valid!\n" );
// Local and total GPU count
num_local_gpu_ = GPU_list_.size();
num_total_gpu_ = num_proc_ * num_local_gpu_;
assert( (num_local_gpu_ > 0) && "The number of local GPUs is not valid!\n" );
// Create MPI_Request buffer
//request_ = (MPI_Request* )malloc(2 * num_local_gpu_ * sizeof(MPI_Request));
// Construct the local gossip all2all library
for(int stage = 0; stage < num_proc_; stage++){
GossipCommHandle_[stage] = new FasterGossipComm(plan_file, GPU_list_);
}
// HWLOC variable setup
hwloc_topology_init(&topo_);
hwloc_topology_set_io_types_filter(topo_, HWLOC_TYPE_FILTER_KEEP_ALL);
hwloc_topology_load(topo_);
hwloc_cpuset_t ori_cpu_set;
hwloc_cpuset_t cpu_set;
ori_cpu_set = hwloc_bitmap_alloc();
cpu_set = hwloc_bitmap_alloc();
// Get the original thread binding for recovery
hwloc_get_cpubind(topo_, ori_cpu_set, HWLOC_CPUBIND_THREAD);
// Get the number of CPU sockets and resize the UCP vector
socket_num_ = hwloc_get_nbobjs_by_type(topo_, HWLOC_OBJ_PACKAGE);
assert( (socket_num_ > 0) && "The number of CPU sockets is not valid!\n" );
// Temp variable used to initialize UCP environment
ucp_params_t ucp_params;
ucp_config_t *ucp_config;
ucp_worker_params_t ucp_worker_params;
size_t ucp_worker_address_len;
std::vector<ucp_ep_params_t> ucp_ep_params(socket_num_ * num_proc_);
ucp_context_.resize(socket_num_);
ucp_worker_.resize(socket_num_);
ucp_worker_address_.resize(socket_num_);
ucp_worker_address_book_.resize(socket_num_ * num_proc_);
ucp_endpoints_.resize(socket_num_, std::vector<ucp_ep_h>(socket_num_ * num_proc_));
// Initialize UCP Env on different CPU sockets
for( int i = 0; i < socket_num_; i++){
// Bind the current thread to run on target CPU socket
hwloc_obj_t current_socket = hwloc_get_obj_by_type(topo_, HWLOC_OBJ_PACKAGE, i);
hwloc_set_cpubind(topo_, current_socket->cpuset, HWLOC_CPUBIND_THREAD);
// Test the place where the current thread is running
hwloc_get_last_cpu_location(topo_, cpu_set, HWLOC_CPUBIND_THREAD);
char * cpu_string;
hwloc_bitmap_asprintf(&cpu_string, cpu_set);
printf("On rank %d, the cpu set that current thread is running on is : %s.\n", rank_, cpu_string);
free(cpu_string);
// Initialize UCP context
memset(&ucp_params, 0, sizeof(ucp_params));
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_ESTIMATED_NUM_EPS;
ucp_params.features = UCP_FEATURE_TAG;
ucp_params.estimated_num_eps = socket_num_ * num_proc_;
ucp_config_read(NULL, NULL, &ucp_config);
ucp_init(&ucp_params, ucp_config, &ucp_context_[i]);
ucp_config_release(ucp_config);
// Initialize UCP worker
memset(&ucp_worker_params, 0, sizeof(ucp_worker_params));
ucp_worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
ucp_worker_params.thread_mode = UCS_THREAD_MODE_SINGLE; // only single thread can access this worker at one time, i.e. no thread safety.
ucp_worker_create(ucp_context_[i], &ucp_worker_params, &ucp_worker_[i]);
// Get address for local worker
ucp_worker_get_address(ucp_worker_[i], &ucp_worker_address_[i], &ucp_worker_address_len);
}
// Recover the CPU binding of current thread
hwloc_set_cpubind(topo_, ori_cpu_set, HWLOC_CPUBIND_THREAD);
// Create EPs for local worker
// Allocate address for all(local and remote) workers
for (auto & iaddress: ucp_worker_address_book_) {
iaddress = (ucp_address_t *)malloc(ucp_worker_address_len);
}
// Copy local worker address to address table
for(int i = 0; i < socket_num_; i++){
memcpy(ucp_worker_address_book_[rank_ * socket_num_ + i], ucp_worker_address_[i], ucp_worker_address_len);
}
// Using MPI to broadcast address from all ranks to all ranks(all broadcast)
for (int iroot = 0; iroot < num_proc_; iroot++) {
for( int i = 0; i < socket_num_; i++){
MPI_Bcast(ucp_worker_address_book_[iroot * socket_num_ + i], ucp_worker_address_len, MPI_BYTE, iroot, comm_);
}
}
// Create EPs on local worker to other workers(include itself)
for(int socket = 0; socket < socket_num_; socket++){
for(int i = 0; i < socket_num_ * num_proc_; i++){
// Only need to set once
if( socket == 0 ){
memset(&ucp_ep_params[i], 0, sizeof(ucp_ep_params[i]));
ucp_ep_params[i].field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ucp_ep_params[i].address = ucp_worker_address_book_[i];
}
ucp_ep_create(ucp_worker_[socket], &ucp_ep_params[i], &ucp_endpoints_[socket][i]);
}
}
// Allocate affinity list for all GPUs on all nodes
for(int i = 0; i < num_proc_; i++){
affinity_list_[i] = (gpu_id_t *)malloc(num_local_gpu_ * sizeof(*affinity_list_[i]));
}
// Assign each local T-GPU to the local L-socket
for(int i = 0; i < num_local_gpu_; i++){
// Find the affinity CPU set that current topo GPU is binding to
hwloc_cudart_get_device_cpuset(topo_, GPU_list_[i], cpu_set);
hwloc_obj_t affinity_socket = hwloc_get_next_obj_covering_cpuset_by_type(topo_, cpu_set, HWLOC_OBJ_PACKAGE, NULL);
affinity_list_[rank_][i] = (gpu_id_t)(affinity_socket -> logical_index);
}
// Using MPI to broadcast GPU locality info to all other ranks
for (int iroot = 0; iroot < num_proc_; iroot++) {
MPI_Bcast(affinity_list_[iroot], num_local_gpu_ * sizeof(*affinity_list_[iroot]), MPI_BYTE, iroot, comm_);
}
hwloc_bitmap_free(ori_cpu_set);
hwloc_bitmap_free(cpu_set);
}
// Dtor
~FasterGossipCommMulti(){
//free(request_);
for(int stage = 0; stage < num_proc_; stage++){
delete GossipCommHandle_[stage];
}
// Release UCP EPs
for(int socket = 0; socket < socket_num_; socket++){
for (int irank = 0; irank < socket_num_ * num_proc_; irank++) {
// Flush all operations associated with the EP and release the EP
ucs_status_ptr_t ucs_status_ptr = ucp_ep_close_nb(ucp_endpoints_[socket][irank], UCP_EP_CLOSE_MODE_FLUSH);
if(UCS_PTR_IS_ERR(ucs_status_ptr) || UCS_PTR_STATUS(ucs_status_ptr) == UCS_OK){
continue;
}
// While the releasing is not finished, progress the worker
while(ucp_request_check_status(ucs_status_ptr) == UCS_INPROGRESS){
for(int j = 0; j < socket_num_; j++){
ucp_worker_progress( ucp_worker_[j] );
}
}
// Free the request
ucp_request_free( ucs_status_ptr );
}
}
// Wait for all ranks to release EPs before releasing any worker
MPI_Barrier(comm_);
// Release worker address
for( int i = 0; i < socket_num_; i++){
ucp_worker_release_address(ucp_worker_[i], ucp_worker_address_[i]);
}
// Release worker
for( int i = 0; i < socket_num_; i++){
ucp_worker_destroy(ucp_worker_[i]);
}
// Release UCP context
for( int i = 0; i < socket_num_; i++){
ucp_cleanup(ucp_context_[i]);
}
// Free address book
for (auto & iaddress : ucp_worker_address_book_) {
free(iaddress);
}
// Free HWLOC topology
hwloc_topology_destroy(topo_);
// Free GPU affinity list
for(int i = 0; i < num_proc_; i++){
free(affinity_list_[i]);
}
}
// Initialize a communication
void Initialize(const std::vector<data_t_ *>& src,
const std::vector<data_t_ *>& dst,
const std::vector<std::vector<size_t>>& send_table,
const std::vector<std::vector<size_t>>& recv_table){
// Device restorer
FasterGossipCommUtil::CudaDeviceRestorer dev_restorer;
// record user provide data
src_ = src;
dst_ = dst;
send_table_ = send_table;
recv_table_ = recv_table;
// Calculate the size of Local buffers and Recv buffers, and allocate on each local GPU
for(int i = 0; i < num_local_gpu_; i++){
size_t max_size = 0;
for (int j = 0; j < num_proc_; j++){
if(j != rank_){
size_t accum_size = 0;
for(int k = 0; k < num_local_gpu_; k++){
accum_size += recv_table_[k][i + j * num_local_gpu_];
}
max_size = std::max(max_size, accum_size);
}
}
// Allocate buffers on current topo GPU
CUDA_CHECK( cudaSetDevice( GPU_list_[i] ) );
CUDA_CHECK( cudaMalloc( &local_buffer_[i], sizeof(data_t_) * max_size ) );
CUDA_CHECK( cudaMalloc( &recv_buffer_[i], sizeof(data_t_) * max_size) );
}
// Max buffer size required by gossip all2all on each GPU
std::vector<size_t> max_temp_buf_size(num_local_gpu_, 0);
// Initialize all gossip all2all object
for(int stage = 0; stage < num_proc_; stage++){
// for first stage, do all2all on local data
if(stage == 0){
// Extract the temp table for local all2all on this stage
for(int i = 0; i < num_local_gpu_; i++){
for(int j = 0; j < num_local_gpu_; j++){
temp_table_[i][j] = recv_table_[j][rank_ * num_local_gpu_ + i];
}
}
// Extract the temp src and dst buffers for local all2all on this stage
for(int i = 0; i < num_local_gpu_; i++){
size_t src_offset = 0;
size_t dst_offset = 0;
for(int j = 0; j < num_local_gpu_ * rank_; j++){
src_offset += send_table_[i][j];
dst_offset += recv_table_[i][j];
}
temp_src_[i] = src_[i] + src_offset;
temp_dst_[i] = dst_[i] + dst_offset;
}
// Initialize the local all2all
std::vector<size_t> temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(temp_src_, temp_dst_, temp_table_);
// Find the largest buffer size needed on each GPU
for(int i = 0; i < num_local_gpu_; i++){
max_temp_buf_size[i] = std::max(temp_buf_size[i], max_temp_buf_size[i]);
}
}
// for later stage, do all2all with data received from previous stage
else{
// previous stage src node
int prev_src_node = (rank_ + num_proc_ - stage) % num_proc_;
// Extract the temp table for local all2all on this stage
for(int i = 0; i < num_local_gpu_; i++){
for(int j = 0; j < num_local_gpu_; j++){
temp_table_[i][j] = recv_table_[j][prev_src_node * num_local_gpu_ + i];
}
}
// Extract the temp dst buffers for local all2all on this stage
for(int i = 0; i < num_local_gpu_; i++){
size_t dst_offset = 0;
for(int j = 0; j < num_local_gpu_ * prev_src_node; j++){
dst_offset += recv_table_[i][j];
}
temp_dst_[i] = dst_[i] + dst_offset;
}
std::vector<size_t> temp_buf_size;
// Initialize the local all2all
if(stage % 2 == 0){
temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(local_buffer_, temp_dst_, temp_table_);
}
else{
temp_buf_size = GossipCommHandle_[stage]->Initialize_no_malloc(recv_buffer_, temp_dst_, temp_table_);
}
// Find the largest buffer size needed on each GPU
for(int i = 0; i < num_local_gpu_; i++){
max_temp_buf_size[i] = std::max(temp_buf_size[i], max_temp_buf_size[i]);
}
}
}
// Allocate max size temp buffers shared by all gossip all2all
for(int i = 0; i < num_local_gpu_; i++){
// Allocate temp buffers on each GPU
CUDA_CHECK( cudaSetDevice( GPU_list_[i] ) );
CUDA_CHECK( cudaMalloc( &temp_buf_[i], sizeof(data_t_) * max_temp_buf_size[i] ) );
}
// Set the allocated temp buffers to all gossip all2all
for(int stage = 0; stage < num_proc_; stage++){
GossipCommHandle_[stage]->set_buf(temp_buf_);
}
// Run exec() in advance to warm up all buffers used by UCX
// For even nodes, 1 run is enough for warm up, for odd nodes, 2 runs is needed
for(int i = 0; i < WARM_UP_ROUND; i++){
exec();
}
}
void exec(){
// loop through all stages
for(int stage = 0; stage < num_proc_; stage++){
// We cuse 2 threads, one for UCX P2P, one for gossip all2all. In the same stage, these 2 operations
// can be executed concurrently
#pragma omp parallel default(none) shared(stage, num_proc_, rank_, num_local_gpu_, \
send_table_, affinity_list_, send_reqs_, ucp_endpoints_,\
socket_num_, src_, recv_table_, recv_reqs_, \
ucp_worker_, recv_buffer_,\
GossipCommHandle_) num_threads(2)
{
// Each thread grab its ID within this OpenMP thread team
int thread_id = omp_get_thread_num();
// Thread 0 do the gossip all2all
if(thread_id == 0){
// do local all2all
// Execute the local all2all
GossipCommHandle_[stage]->execAsync();
// wait for local all2all to complete
GossipCommHandle_[stage]->sync();
}
// Thread 1 do the UCX P2P
else{
// for all stage except last stage, send and receive data to/from other nodes
if(stage < num_proc_ -1){
// The dst and src rank of local node in this stage
int dst_rank = (rank_ + stage + 1) % num_proc_;
int src_rank = (rank_ + num_proc_ - stage - 1) % num_proc_;
// loop through all local GPUs to send GPU buffers to dst worker
for(int i = 0; i < num_local_gpu_; i++){
size_t src_offset = 0;
size_t src_len = 0;
// Accumulate the offset within the src_buffer
for(int j = 0; j < num_local_gpu_ * dst_rank; j++){
src_offset += send_table_[i][j];
}
// Accumulate the amount of elements to send to the target node
for(int j = 0; j < num_local_gpu_; j++){
src_len += send_table_[i][j + num_local_gpu_ * dst_rank];
}
//MPI_Isend(src_[i] + src_offset, sizeof(data_t_) * src_len, MPI_BYTE, dst_rank, i, comm_, request_ + i);
// Prepare the tag for tag-matching massage passing, the tag should identify the user tag, source worker of the tag and other info
ucp_tag_t comm_tag = 0LLU;
// MSB 32-bit for original MPI TAG
comm_tag |= ((ucp_tag_t)i << 32);
// 16-32 bits are source rank
comm_tag |= ((ucp_tag_t)(rank_ & 0x0000FFFF) << 16);
// The 0-15 bits are source L-socket(worker)
comm_tag |= (((ucp_tag_t)(affinity_list_[rank_][i])) & 0x000000000000FFFF);
send_reqs_[i] = ucp_tag_send_nb(
ucp_endpoints_[affinity_list_[rank_][i]][dst_rank * socket_num_ + affinity_list_[dst_rank][i]], src_[i] + src_offset,
sizeof(data_t_) * src_len, ucp_dt_make_contig(sizeof(char)),
comm_tag, empty_send_callback_func
);
// If the returned request is not a valid pointer, that means that the operation already finished(failed or completed), the callback will not been
// called in these situation and the returned request is not de-referencable thus no release needed.
if(UCS_PTR_IS_ERR(send_reqs_[i]) || UCS_PTR_STATUS(send_reqs_[i]) == UCS_OK){
send_reqs_[i] = nullptr;
}
}
// loop through all local GPUs to receive GPU buffers from src worker
for(int i = 0; i < num_local_gpu_; i++){
size_t dst_len = 0;
// Accumulate the amount of elements to receive from the source node
for(int j = 0; j < num_local_gpu_; j++){
dst_len += recv_table_[j][i + src_rank * num_local_gpu_];
}
//MPI_Irecv(recv_buffer_[i], sizeof(data_t_) * dst_len, MPI_BYTE, src_rank, i, comm_, request_ + num_local_gpu_ +i);
// Prepare the tag for tag-matching massage passing, the tag should identify the user tag, source worker of the tag and other info
ucp_tag_t comm_tag = 0LLU;
// MSB 32-bit for original MPI TAG
comm_tag |= ((ucp_tag_t)i << 32);
// 16-32 bits are source rank
comm_tag |= ((ucp_tag_t)(src_rank & 0x0000FFFF) << 16);
// The 0-15 bits are source L-socket(worker)
comm_tag |= (((ucp_tag_t)(affinity_list_[src_rank][i])) & 0x000000000000FFFF);
recv_reqs_[i] = ucp_tag_recv_nb(
ucp_worker_[affinity_list_[rank_][i]], recv_buffer_[i],
sizeof(data_t_) * dst_len, ucp_dt_make_contig(sizeof(char)),
comm_tag, (ucp_tag_t)-1, empty_recv_callback_func
);
// The same as send, but recv API never return UCS_OK, only UCS_ERR_xx or valid pointer can be returned
if(UCS_PTR_IS_ERR(recv_reqs_[i])){
recv_reqs_[i] = nullptr;
}
}
}
// for all stage except last stage, wait for UCX communication to finish
if(stage < num_proc_ -1){
// Wait for all send to finish
for(int i = 0; i < num_local_gpu_; i++){
// If the current operation is not completed yet, progress it
while( send_reqs_[i] != nullptr && ucp_request_check_status(send_reqs_[i]) == UCS_INPROGRESS){
for(int j = 0; j < socket_num_; j++){
ucp_worker_progress( ucp_worker_[j] );
}
}
}
// Wait for all receive to finish
for(int i = 0; i < num_local_gpu_; i++){
// If the current operation is not completed yet, progress it
while( recv_reqs_[i] != nullptr && ucp_request_check_status(recv_reqs_[i]) == UCS_INPROGRESS){
for(int j = 0; j < socket_num_; j++){
ucp_worker_progress( ucp_worker_[j] );
}
}
}
// Da-allocate UCP request before going to next round
for(int i = 0; i < num_local_gpu_; i++){
if(send_reqs_[i] != nullptr){
ucp_request_free( send_reqs_[i] );
send_reqs_[i] = nullptr;
}
if(recv_reqs_[i] != nullptr){
ucp_request_free( recv_reqs_[i] );
recv_reqs_[i] = nullptr;
}
}
//MPI_Waitall(2 * num_local_gpu_, request_, MPI_STATUSES_IGNORE);
}
}
}
// Swap recv_buffer and local_buffer pointer. If there is odd nodes, do not swap in the last stage
if(num_proc_ % 2 != 0 && stage == num_proc_ - 1){
continue;
}
recv_buffer_.swap(local_buffer_);
}// stage loop
}
void reset(){
// Device restorer
FasterGossipCommUtil::CudaDeviceRestorer dev_restorer;
// Free local_buffer and recv_buffer, ready for next multi-node all2all
for(int i = 0; i < num_local_gpu_; i++){
// Free temp buffers on each GPU
CUDA_CHECK( cudaSetDevice( GPU_list_[i] ) );
CUDA_CHECK( cudaFree( local_buffer_[i] ) );
CUDA_CHECK( cudaFree( recv_buffer_[i] ) );
}
// Free gossip all2all temp buffers
for(int i = 0; i < num_local_gpu_; i++){
CUDA_CHECK( cudaSetDevice( GPU_list_[i] ) );
CUDA_CHECK( cudaFree( temp_buf_[i] ) );
}
}
private:
// GPU list
std::vector<gpu_id_t> GPU_list_;
// GPU count
gpu_id_t num_local_gpu_;
gpu_id_t num_total_gpu_;
// MPI-related resource
int rank_;
int num_proc_;
MPI_Comm comm_;
//MPI_Request * request_;
// Local gossip all2all library
std::vector<FasterGossipComm *> GossipCommHandle_;
// Temp local GPU buffers for remote data
std::vector<data_t_ *> local_buffer_;
std::vector<data_t_ *> recv_buffer_;
// Temp local GPU buffers for local all2all
std::vector<data_t_ *> temp_buf_;
// Buffers and tables provided by users
std::vector<data_t_ *> src_;
std::vector<data_t_ *> dst_;
std::vector<std::vector<size_t>> send_table_;
std::vector<std::vector<size_t>> recv_table_;
// Temp table for local all2all
std::vector<std::vector<size_t>> temp_table_;
// Temp src and dst pinter vector for local all2all
std::vector<data_t_ *> temp_src_;
std::vector<data_t_ *> temp_dst_;
// Socket count
int socket_num_;
// UCP variable: UCP context, UCP worker, UCP address, UCP EP and UCP request
std::vector<ucp_context_h> ucp_context_;
std::vector<ucp_worker_h> ucp_worker_;
std::vector<ucp_address_t *> ucp_worker_address_;
std::vector<ucp_address_t *> ucp_worker_address_book_;
std::vector<std::vector<ucp_ep_h>> ucp_endpoints_;
std::vector<ucs_status_ptr_t> send_reqs_;
std::vector<ucs_status_ptr_t> recv_reqs_;
// HWLOC variable: topo
hwloc_topology_t topo_;
// The buffers that record the locality of each GPU in GPU list on each nodes
std::vector<gpu_id_t *> affinity_list_;
}; // class
} // namespace
|
GB_unaryop__minv_uint8_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int64
// op(A') function: GB_tran__minv_uint8_int64
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int64
(
uint8_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SybaseASE_fmt_plug.c | /*
* Unicode conversion enhancements by magnum, 2011. Licensed as below.
*
* Sybase ASE hash support for version 15.0.2 and above, based on hmailserver
* patch by James Nobis.
* Hash format description : http://marcellmajor.com/sybase_sha256.html
* Hacked together by Dhiru Kholia in February, 2011.
*
* This patch Copyright (C) 2010 by James Nobis - quel
* quel NOSPAM quelrod NOSPAM net, and it is herby released to the general
* public under the follow terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* Inspiration from the generic sha-1 and md5 (Copyright (c) 2010 by Solar Designer)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_SybaseASE;
#elif FMT_REGISTERS_H
john_register_one(&fmt_SybaseASE);
#else
#include "arch.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
//
//#define FORCE_GENERIC_SHA2 2
#include "sha2.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "SybaseASE"
#define FORMAT_NAME "Sybase ASE"
#define FORMAT_TAG "0xc007"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 16 + 64)
#define BINARY_SIZE 32
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 512
#endif
#endif // __MIC__
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#endif
static struct fmt_tests SybaseASE_tests[] = {
{"0xc0074f9cc8c0d55d9803b0c0816e127f2a56ee080230af5b4ce3da1f3d9fcc5449fcfcf3fb9595eb8ea6", "test12"},
{"0xc0074BE393C06BE420AD541671aa5e6f1a19a4a73bb51c59f45790f0887cfb70e0599747c6844d4556b3", "a"},
{NULL}
};
#ifdef SIMD_COEF_32
// note, elements 3-7 are 'nulls', and are not in this array.
static UTF16 (*prep_key)[4][MAX_KEYS_PER_CRYPT][64 / sizeof(UTF16)];
static unsigned char *NULL_LIMB;
static int (*last_len);
static ARCH_WORD_32 (*crypt_cache)[BINARY_SIZE/4];
#else
static UTF16 (*prep_key)[518 / sizeof(UTF16)];
static SHA256_CTX (*prep_ctx);
#endif
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE/4];
static int kpc, dirty;
extern struct fmt_main fmt_SybaseASE;
static void init(struct fmt_main *self)
{
#if _OPENMP || SIMD_COEF_32
int i;
#endif
#ifdef _OPENMP
i = omp_get_max_threads();
self->params.min_keys_per_crypt *= i;
i *= OMP_SCALE;
self->params.max_keys_per_crypt *= i;
#endif
kpc = self->params.max_keys_per_crypt;
prep_key = mem_calloc_align(sizeof(*prep_key),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
crypt_out = mem_calloc_align(sizeof(*crypt_out),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
if (options.target_enc == UTF_8)
fmt_SybaseASE.params.plaintext_length = 125;
// will simply set SIMD stuff here, even if not 'used'
#ifdef SIMD_COEF_32
NULL_LIMB = mem_calloc_align(64, MAX_KEYS_PER_CRYPT, MEM_ALIGN_CACHE);
last_len = mem_calloc_align(sizeof(*last_len), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
for (i = 0; i < kpc/MAX_KEYS_PER_CRYPT; ++i) {
int j;
for (j = 0; j < MAX_KEYS_PER_CRYPT; ++j) {
prep_key[i][3][j][3] = 0x80;
prep_key[i][3][j][30] = 518<<3;
}
}
crypt_cache = mem_calloc_align(sizeof(*crypt_cache),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
#else
prep_ctx = mem_calloc(sizeof(*prep_key),
self->params.max_keys_per_crypt);
#endif
}
static void done(void)
{
#ifdef SIMD_COEF_32
MEM_FREE(last_len);
MEM_FREE(NULL_LIMB);
MEM_FREE(crypt_cache);
#else
MEM_FREE(prep_ctx);
#endif
MEM_FREE(crypt_out);
MEM_FREE(prep_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int extra;
if(strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0)
return 0;
if(hexlen(&ciphertext[FORMAT_TAG_LEN], &extra) != CIPHERTEXT_LENGTH - FORMAT_TAG_LEN || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH+1];
strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH+1);
strlwr(out);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
int i;
char *p = ciphertext + FORMAT_TAG_LEN + SALT_SIZE * 2;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char u8[SALT_SIZE];
ARCH_WORD_32 u32;
} out;
int i;
char *p = ciphertext + FORMAT_TAG_LEN;
for (i = 0; i < sizeof(out.u8); i++) {
out.u8[i] = (atoi16[ARCH_INDEX(*p)] << 4) |atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out.u8;
}
static int get_hash_0(int index)
{
return crypt_out[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[index][0] & PH_MASK_6;
}
static void set_salt(void *salt)
{
int index;
for(index = 0; index < kpc; index++)
{
/* append salt at offset 510 */
#ifdef SIMD_COEF_32
int idx1=index/MAX_KEYS_PER_CRYPT, idx2=index%MAX_KEYS_PER_CRYPT;
memcpy(&prep_key[idx1][2][idx2][31], salt, 2);
memcpy(prep_key[idx1][3][idx2], &((unsigned char*)salt)[2], 6);
#else
memcpy((unsigned char*)prep_key[index] + 510,
(unsigned char*)salt, 8);
#endif
}
}
static void set_key(char *key, int index)
{
#ifdef SIMD_COEF_32
UTF16 tmp[PLAINTEXT_LENGTH+1];
int len2, len = enc_to_utf16_be(tmp, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
int idx1=index/MAX_KEYS_PER_CRYPT, idx2=index%MAX_KEYS_PER_CRYPT;
if (len < 0)
len = strlen16(tmp);
if (len > 32)
memcpy(prep_key[idx1][1][idx2], &tmp[32], (len-32)<<1);
len2 = len;
if (len2 > 32) len2 = 32;
memcpy(prep_key[idx1][0][idx2], tmp, len2<<1);
len2 = len;
while (len < last_len[index]) {
if (len < 32)
prep_key[idx1][0][idx2][len] = 0;
else
prep_key[idx1][1][idx2][len-32] = 0;
++len;
}
last_len[index] = len2;
#else
/* Clean slate */
memset(prep_key[index], 0, 2 * PLAINTEXT_LENGTH);
/* convert key to UTF-16BE, --encoding aware */
enc_to_utf16_be(prep_key[index], PLAINTEXT_LENGTH, (UTF8*)key,
strlen(key));
#endif
dirty = 1;
}
static char *get_key(int index)
{
UTF16 key_le[PLAINTEXT_LENGTH + 1];
#ifdef SIMD_COEF_32
int j, idx1=index/MAX_KEYS_PER_CRYPT, idx2=index%MAX_KEYS_PER_CRYPT;
if (last_len[index] < 32) {
for (j = 0; j < last_len[index]; ++j)
key_le[j] = JOHNSWAP(prep_key[idx1][0][idx2][j])>>16;
} else {
for (j = 0; j < 32; ++j)
key_le[j] = JOHNSWAP(prep_key[idx1][0][idx2][j])>>16;
for (; j < last_len[index]; ++j)
key_le[j] = JOHNSWAP(prep_key[idx1][1][idx2][j-32])>>16;
}
key_le[j] = 0;
#else
UTF16 *d = key_le;
UTF16 *s = prep_key[index];
// Byte-swap back to UTF-16LE
while ((*d++ = *s >> 8 | *s << 8))
s++;
#endif
return (char*)utf16_to_enc(key_le);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifndef SIMD_COEF_32
#pragma omp parallel for default(none) private(index) shared(dirty, prep_ctx, count, crypt_out, prep_key)
#else
#pragma omp parallel for default(none) private(index) shared(dirty, count, crypt_cache, crypt_out, prep_key, NULL_LIMB)
#endif
#endif
for(index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifndef SIMD_COEF_32
SHA256_CTX ctx;
if (dirty) {
SHA256_Init(&prep_ctx[index]);
SHA256_Update(&prep_ctx[index], prep_key[index], 510);
}
memcpy(&ctx, &prep_ctx[index], sizeof(ctx));
SHA256_Update(&ctx, prep_key[index] + 510/2, 8);
SHA256_Final((unsigned char *)crypt_out[index], &ctx);
#else
unsigned char _OBuf[32*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *crypt;
uint32_t *crypt32;
crypt = (unsigned char*)mem_align(_OBuf, MEM_ALIGN_CACHE);
crypt32 = (uint32_t*)crypt;
if (dirty) {
SIMDSHA256body(prep_key[index/MAX_KEYS_PER_CRYPT], crypt_cache[index], NULL, SSEi_FLAT_IN|SSEi_FLAT_RELOAD_SWAPLAST);
SIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][1]), crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
SIMDSHA256body(NULL_LIMB, crypt_cache[index], crypt_cache[index], SSEi_FLAT_IN|SSEi_RELOAD);
}
memcpy(crypt32, crypt_cache[index], 32*MAX_KEYS_PER_CRYPT);
SIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][2]), crypt32, crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_RELOAD_SWAPLAST);
// Last one with FLAT_OUT
SIMDSHA256body(&(prep_key[index/MAX_KEYS_PER_CRYPT][3]), crypt_out[index], crypt32, SSEi_FLAT_IN|SSEi_RELOAD|SSEi_FLAT_OUT);
#endif
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (index = 0; index < count; index++)
if (*(ARCH_WORD_32 *)binary == *(ARCH_WORD_32 *)crypt_out[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp((char *)binary, (const char*)crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int salt_hash(void *salt)
{
return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_SybaseASE = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
SybaseASE_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
blur.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : blur.c
* Description : blur
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_blur_c__
#define __libaroma_blur_c__
#include <aroma_internal.h>
float * _libaroma_blur_kernel(const int inRadius) {
int mem_amount = (inRadius * 2) + 1;
float * gaussian_kernel = (float *) malloc(mem_amount * sizeof(float));
float twoRadiusSquaredRecip = 1.0 / (2.0 * inRadius * inRadius);
float sqrtTwoPiTimesRadiusRecip = 1.0 / (sqrt(2.0 * __PI) * inRadius);
int r = -inRadius;
float sum = 0.0f;
int i;
for (i = 0; i < mem_amount; i++) {
float x = r * r;
float v = sqrtTwoPiTimesRadiusRecip * exp(-x * twoRadiusSquaredRecip);
gaussian_kernel[i] = v;
sum += v;
r++;
}
float div = sum;
for (i = 0; i < mem_amount; i++) {
gaussian_kernel[i] /= div;
}
return gaussian_kernel;
}
float * _libaroma_blur_kernel_norm(const int inRadius) {
int mem_amount = inRadius+1;
float * gaussian_kernel = (float *) malloc(mem_amount * sizeof(float));
int i;
for (i = 0; i < mem_amount; i++) {
gaussian_kernel[i] = libaroma_cubic_bezier(0,0,0.4,1,((float) i) / ((float) mem_amount));
}
return gaussian_kernel;
}
byte libaroma_draw_shadow(
LIBAROMA_CANVASP dst,
int dx, int dy, int w, int h,
int radiusx, int radiusy,
byte alphamax, byte fill
){
float * kernelx = _libaroma_blur_kernel_norm(radiusx);
float * kernely = _libaroma_blur_kernel_norm(radiusy);
LIBAROMA_CANVASP cv = libaroma_canvas_ex(radiusx*2+1,radiusy*2+1,1);
LIBAROMA_CANVASP horiz = libaroma_canvas_ex(w,radiusy*2,1);
LIBAROMA_CANVASP vert = libaroma_canvas_ex(radiusx*2+1,h,1);
memset(cv->alpha, 0, cv->s);
memset(horiz->alpha, 0, horiz->s);
memset(vert->alpha, 0, vert->s);
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y=0;y<=radiusy;y++){
int ypos=(cv->l*y);
int x;
for (x=0;x<=radiusx;x++){
byte av = MIN(alphamax,(kernelx[x]*kernely[y]) * alphamax);
if (x!=radiusx){
cv->alpha[ypos+cv->w-x-1]=cv->alpha[ypos+x]=av; //(av<8)?0:av;
}
else{
cv->alpha[ypos+x]=av;//(av<8)?0:av;
}
}
if (y!=radiusy){
int bpos=(cv->l*(cv->h-y-1));
memcpy(cv->alpha+bpos,cv->alpha+ypos,cv->w);
memset(
horiz->alpha+(y*horiz->l),
cv->alpha[ypos+radiusx],
w
);
memset(
horiz->alpha+((radiusy*2-y-1)*horiz->l),
cv->alpha[ypos+radiusx],
w
);
}
}
bytep calpha=cv->alpha+(cv->l*radiusy);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y=0;y<h;y++){
memcpy(vert->alpha+(y*vert->l),calpha,vert->l);
}
if (fill){
libaroma_draw_rect(dst,dx,dy,w,h,0,alphamax);
}
libaroma_draw_ex(dst,cv,dx-radiusx,dy-radiusy,0,0,radiusx,radiusy,1,0xff); /* left-top */
libaroma_draw_ex(dst,cv,dx+w,dy-radiusy,radiusx+1,0,radiusx,radiusy,1,0xff); /* right-top */
libaroma_draw_ex(dst,cv,dx-radiusx,dy+h,0,radiusy+1,radiusx,radiusy,1,0xff); /* left-bottom */
libaroma_draw_ex(dst,cv,dx+w,dy+h,radiusx+1,radiusy+1,radiusx,radiusy,1,0xff); /* right-bottom */
libaroma_draw_ex(dst,horiz,dx,dy-radiusy,0,0,horiz->w,radiusy,1,0xff); /* top */
libaroma_draw_ex(dst,horiz,dx,dy+h,0,radiusy,horiz->w,radiusy,1,0xff); /* bottom */
libaroma_draw_ex(dst,vert,dx-radiusx,dy,0,0,radiusx,vert->h,1,0xff); /* left */
libaroma_draw_ex(dst,vert,dx+w,dy,radiusx+1,0,radiusx,vert->h,1,0xff); /* right */
libaroma_canvas_free(cv);
libaroma_canvas_free(horiz);
libaroma_canvas_free(vert);
free(kernelx);
free(kernely);
return 1;
}
/*
* Function : libaroma_blur_ex
* Return Value: LIBAROMA_CANVASP
* Descriptions: create new blur-ed canvas - extended
*/
LIBAROMA_CANVASP libaroma_blur_ex(
LIBAROMA_CANVASP src,
const int inRadius,
byte isMask,
word maskColor
) {
if (inRadius < 1) {
return NULL;
}
byte usealpha = (src->alpha ? 1 : 0);
if (isMask && !usealpha) {
return NULL;
}
float * kernel = _libaroma_blur_kernel(inRadius);
int radius2 = inRadius * 2;
int pixels_on_row = radius2 + 1;
int height = src->h;
int width = src->w;
int x, y, o;
int nwidth = width + radius2;
int nheight = height + radius2;
LIBAROMA_CANVASP t1 = libaroma_canvas_ex(nwidth, nheight, usealpha);
if (!t1){
return NULL;
}
LIBAROMA_CANVASP t2 = libaroma_canvas_ex(nwidth, nheight, usealpha);
if (!t2){
libaroma_canvas_free(t1);
return NULL;
}
libaroma_canvas_setcolor(t1,0,0);
if (isMask){
libaroma_canvas_setcolor(t2,maskColor,0);
}
else{
libaroma_canvas_setcolor(t2,0,0);
}
int sz = nwidth * nheight;
if (usealpha) {
memset(t1->alpha, 0, sz);
memset(t2->alpha, 0, sz);
}
int r, g, b, a;
/* X PASS */
for (y = 0; y < height; y++) {
int row = y * src->l;
int drw = (y + inRadius) * t1->l;
for (x = 0; x < nwidth; x++) {
r = g = b = a = 0;
for (o = 0; o < pixels_on_row; o++) {
int sx = (x - radius2) + o;
if (!libaroma_draw_limited(sx, width)) {
int pos = row + sx;
if (!isMask) {
word c = src->data[pos];
r += libaroma_color_r(c) * kernel[o];
g += libaroma_color_g(c) * kernel[o];
b += libaroma_color_b(c) * kernel[o];
}
if (usealpha) {
a += src->alpha[pos] * kernel[o];
}
}
}
int dpos = drw + x;
if (!isMask) {
r = MAX(MIN(r, 0xff), 0);
g = MAX(MIN(g, 0xff), 0);
b = MAX(MIN(b, 0xff), 0);
t1->data[dpos] = libaroma_dither_rgb(x, y, r, g, b);
if (usealpha) {
a = MAX(MIN(a, 0xff), 0);
t1->alpha[dpos] = a;
}
}
else {
a = MAX(MIN(a, 0xff), 0);
t1->alpha[dpos] = a;
}
}
}
/* Y PASS */
for (y = 0; y < nheight; y++) {
int row = y * t1->l;
for (x = 0; x < nwidth; x++) {
r = g = b = a = 0;
for (o = 0; o < pixels_on_row; o++) {
int sy = (y - inRadius) + o;
if (!libaroma_draw_limited(sy, nheight)) {
int pos = (sy * t1->l) + x;
if (!isMask) {
word c = t1->data[pos];
r += libaroma_color_r(c) * kernel[o];
g += libaroma_color_g(c) * kernel[o];
b += libaroma_color_b(c) * kernel[o];
}
if (usealpha) {
a += t1->alpha[pos] * kernel[o];
}
}
}
int dpos = row + x;
if (!isMask) {
r = MAX(MIN(r, 0xff), 0);
g = MAX(MIN(g, 0xff), 0);
b = MAX(MIN(b, 0xff), 0);
t2->data[dpos] = libaroma_dither_rgb(x, y, r, g, b);
if (usealpha) {
a = MAX(MIN(a, 0xff), 0);
t2->alpha[dpos] = a;
}
}
else {
a = MAX(MIN(a, 0xff), 0);
t2->alpha[dpos] = a;
}
}
}
free(kernel);
libaroma_canvas_free(t1);
return t2;
} /* End of libaroma_blur_ex */
#endif /* __libaroma_blur_c__ */
|
gameoflife.c | //------------------------------------------------------------------------------
//
// Name: gameoflife.c
//
// Purpose: Run a naive Conway's game of life
//
// HISTORY: Written by Tom Deakin and Simon McIntosh-Smith, August 2013
//
//------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define FINALSTATEFILE "final_state.dat"
// Define the state of the cell
#define DEAD 0
#define ALIVE 1
/*************************************************************************************
* Forward declarations of utility functions
************************************************************************************/
void die(const char* message, const int line, const char *file);
void load_board(char* board, const char* file, const unsigned int nx, const unsigned int ny);
void print_board(const char* board, const unsigned int nx, const unsigned int ny);
void save_board(const char* board, const unsigned int nx, const unsigned int ny);
void load_params(const char *file, unsigned int *nx, unsigned int *ny, unsigned int *iterations);
/*************************************************************************************
* Game of Life worker method
************************************************************************************/
// Apply the rules of life to tick and save in tock
void accelerate_life(const char* tick, char* tock, const int nx, const int ny)
{
// The cell we work on in the loop
unsigned int idx;
// Indexes of rows/columns next to idx
// wrapping around if required
unsigned int x_l, x_r, y_u, y_d;
unsigned int j;
#pragma omp parallel for private(j, idx, x_l, x_r, y_u, y_d)
for (unsigned int i = 0; i < ny; i++)
{
for (j = 0; j < nx; j++)
{
// Calculate indexes
idx = i * nx + j;
x_r = (j + 1) % nx;
x_l = (j == 0) ? nx - 1 : j - 1;
y_u = (i + 1) % ny;
y_d = (i == 0) ? ny - 1: i - 1;
// Count alive neighbours (out of eight)
int neighbours = 0;
if (tick[i * nx + x_l] == ALIVE) neighbours++;
if (tick[y_u * nx + x_l] == ALIVE) neighbours++;
if (tick[y_d * nx + x_l] == ALIVE) neighbours++;
if (tick[i * nx + x_r] == ALIVE) neighbours++;
if (tick[y_u * nx + x_r] == ALIVE) neighbours++;
if (tick[y_d * nx + x_r] == ALIVE) neighbours++;
if (tick[y_u * nx + j] == ALIVE) neighbours++;
if (tick[y_d * nx + j] == ALIVE) neighbours++;
// Apply game of life rules
if (tick[idx] == ALIVE)
{
if (neighbours == 2 || neighbours == 3)
// Cell lives on
tock[idx] = ALIVE;
else
// Cell dies by over/under population
tock[idx] = DEAD;
}
else
{
if (neighbours == 3)
// Cell becomes alive through reproduction
tock[idx] = ALIVE;
else
// Remains dead
tock[idx] = DEAD;
}
}
}
}
/*************************************************************************************
* Main function
************************************************************************************/
int main(int argc, char **argv)
{
// Check we have a starting state file
if (argc != 3)
{
printf("Usage:\n./gameoflife input.dat input.params\n");
return EXIT_FAILURE;
}
// Board dimensions and iteration total
unsigned int nx, ny;
unsigned int iterations;
load_params(argv[2], &nx, &ny, &iterations);
// Allocate memory for boards
char* board_tick = (char *)calloc(nx * ny, sizeof(char));
char* board_tock = (char *)calloc(nx * ny, sizeof(char));
if (!board_tick || !board_tock)
die("Could not allocate memory for board", __LINE__, __FILE__);
// Load in the starting state to board_tick
load_board(board_tick, argv[1], nx, ny);
// Display the starting state
printf("Starting state\n");
print_board(board_tick, nx, ny);
// Loop
for (unsigned int i = 0; i < iterations; i++)
{
// Apply the rules of Life
accelerate_life(board_tick, board_tock, nx, ny);
// Swap the boards over
char *tmp = board_tick;
board_tick = board_tock;
board_tock = tmp;
}
// Display the final state
printf("Finishing state\n");
print_board(board_tick, nx, ny);
// Save the final state of the board
save_board(board_tick, nx, ny);
return EXIT_SUCCESS;
}
/*************************************************************************************
* Utility functions
************************************************************************************/
// Function to load the params file and set up the X and Y dimensions
void load_params(const char* file, unsigned int *nx, unsigned int *ny, unsigned int *iterations)
{
FILE *fp = fopen(file, "r");
if (!fp)
die("Could not open params file.", __LINE__, __FILE__);
int retval;
retval = fscanf(fp, "%d\n", nx);
if (retval != 1)
die("Could not read params file: nx.", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", ny);
if (retval != 1)
die("Could not read params file: ny", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", iterations);
if (retval != 1)
die("Could not read params file: iterations", __LINE__, __FILE__);
fclose(fp);
}
// Function to load in a file which lists the alive cells
// Each line of the file is expected to be: x y 1
void load_board(char* board, const char* file, const unsigned int nx, const unsigned int ny)
{
FILE *fp = fopen(file, "r");
if (!fp)
die("Could not open input file.", __LINE__, __FILE__);
int retval;
unsigned int x, y, s;
while ((retval = fscanf(fp, "%u %u %u\n", &x, &y, &s)) != EOF)
{
if (retval != 3)
die("Expected 3 values per line in input file.", __LINE__, __FILE__);
if (x > nx - 1)
die("Input x-coord out of range.", __LINE__, __FILE__);
if (y > ny - 1)
die("Input y-coord out of range.", __LINE__, __FILE__);
if (s != ALIVE)
die("Alive value should be 1.", __LINE__, __FILE__);
board[x + y * nx] = ALIVE;
}
fclose(fp);
}
// Function to print out the board to stdout
// Alive cells are displayed as O
// Dead cells are displayed as .
void print_board(const char* board, const unsigned int nx, const unsigned int ny)
{
for (unsigned int i = 0; i < ny; i++)
{
for (unsigned int j = 0; j < nx; j++)
{
if (board[i * nx + j] == DEAD)
printf(".");
else
printf("O");
}
printf("\n");
}
}
void save_board(const char* board, const unsigned int nx, const unsigned int ny)
{
FILE *fp = fopen(FINALSTATEFILE, "w");
if (!fp)
die("Could not open final state file.", __LINE__, __FILE__);
for (unsigned int i = 0; i < ny; i++)
{
for (unsigned int j = 0; j < nx; j++)
{
if (board[i * nx + j] == ALIVE)
fprintf(fp, "%d %d %d\n", j, i, ALIVE);
}
}
}
// Function to display error and exit nicely
void die(const char* message, const int line, const char *file)
{
fprintf(stderr, "Error at line %d of file %s:\n", line, file);
fprintf(stderr, "%s\n",message);
fflush(stderr);
exit(EXIT_FAILURE);
}
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
void finalize(Function *Fn = nullptr);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
LocationDescription(const IRBuilderBase &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Add metadata to simd-ize a loop.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to simd-ize.
void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction, as well as
/// the element type of these pointers. They are expected to atomically
/// update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: ElementType(ElementType), Variable(Variable),
PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
AtomicReductionGen(AtomicReductionGen) {
assert(cast<PointerType>(Variable->getType())
->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
}
/// Reduction element type, must match pointee type of variable.
Type *ElementType;
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column,
uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
uint32_t &SrcLocStrSize);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a hidden global flag \p Name in the module with initial value \p
/// Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
SmallVector<Value *, 2> ExcludeArgsFromAggregate;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// Create a runtime call for __tgt_interop_init
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param InteropType type of interop operation
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_init call
CallInst *createOMPInteropInit(const LocationDescription &Loc,
Value *InteropVar,
omp::OMPInteropType InteropType, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_destroy
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_destroy call
CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_use
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_use call
CallInst *createOMPInteropUse(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences, Value *DependenceAddress,
bool HaveNowaitClause);
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture, Compare };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocaIP The insertion point to be used for alloca
/// instructions.
/// \param X The target atomic pointer to be updated
/// \param XElemTy The element type of the atomic pointer.
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *>
emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
bool IsXBinopExpr);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
Type *ElemTy = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
InsertPointTy AllocaIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXBinopExpr);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXBinopExpr);
/// Emit atomic compare for constructs: --- Only scalar data types
/// cond-update-atomic:
/// x = x ordop expr ? expr : x;
/// x = expr ordop x ? expr : x;
/// x = x == e ? d : x;
/// x = e == x ? d : x; (this one is not in the spec)
/// cond-update-stmt:
/// if (x ordop expr) { x = expr; }
/// if (expr ordop x) { x = expr; }
/// if (x == e) { x = d; }
/// if (e == x) { x = d; } (this one is not in the spec)
///
/// \param Loc The insert and source location description.
/// \param X The target atomic pointer to be updated.
/// \param E The expected value ('e') for forms that use an
/// equality comparison or an expression ('expr') for
/// forms that use 'ordop' (logically an atomic maximum or
/// minimum).
/// \param D The desired value for forms that use an equality
/// comparison. If forms that use 'ordop', it should be
/// \p nullptr.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param Op Atomic compare operation. It can only be ==, <, or >.
/// \param IsXBinopExpr True if the conditional statement is in the form where
/// x is on LHS. It only matters for < or >.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy createAtomicCompare(const LocationDescription &Loc,
AtomicOpValue &X, Value *E, Value *D,
AtomicOrdering AO,
omp::OMPAtomicCompareOp Op,
bool IsXBinopExpr);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const;
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit->getSingleSuccessor();
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Preheader = getPreheader();
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Body = getBody();
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *After = getAfter();
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define HOST_MAX_TEAMS 128
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
double * pA = malloc(N*sizeof(double));
int fail = 0;
INIT();
//
// Test: if clause
//
ZERO(A);
int num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
// the number of teams started is implementation dependent
int actual_teams = -1;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams if(0) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: device clause
//
ZERO(A);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams device(0) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: map clause
//
ZERO(pA);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams map(pA[:N]) map(tofrom:actual_teams)
{
if(omp_get_team_num() == 0)
actual_teams = omp_get_num_teams();
pA[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < actual_teams ; i++)
if (pA[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, pA[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: num_teams and omp_get_team_num()
//
ZERO(A);
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams)
{
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: thread_limit and omp_get_thread_num()
//
ZERO(A);
fail = 0;
int num_threads = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(1) thread_limit(num_threads)
#pragma omp parallel
{
int tid = omp_get_thread_num();
A[tid] += (double) tid;
}
}
for (int i = 0 ; i < num_threads ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: if statement in teams region
//
ZERO(A);
fail = 0;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams)
{
if (omp_get_team_num() % 2 == 0) {
int teid = omp_get_team_num();
A[teid] += (double) 1;
}
else {
int teid = omp_get_team_num();
A[teid] += (double) 2;
}
}
}
for (int i = 0 ; i < num_teams ; i++) {
if (i % 2 == 0) {
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
} else
if (A[i] != 2*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) 2*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
/* // */
/* // Test: num_teams and thread_limit by simulating a distribute pragma */
/* // */
/* ZERO(A); */
/* fail = 0; */
/* for (int t = 0 ; t < TRIALS ; t++) { */
/* #pragma omp target teams num_teams(2) thread_limit(496) */
/* { */
/* if (omp_get_team_num() == 0) { */
/* #pragma omp parallel */
/* { */
/* A[omp_get_team_num()*496+omp_get_thread_num()] += omp_get_thread_num(); */
/* if(omp_get_thread_num() == 498) printf("teid = %d, tid = %d, accessing %d\n", omp_get_team_num(), omp_get_thread_num(), omp_get_team_num()*496+omp_get_thread_num()); */
/* } */
/* } else { */
/* #pragma omp parallel */
/* { */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* A[omp_get_team_num()*496+omp_get_thread_num()] -= omp_get_thread_num(); */
/* if(omp_get_thread_num() == 0) */
/* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */
/* } */
/* } */
/* } */
/* } */
/* for (int i = 0 ; i < 992 ; i++) { */
/* if (i < 496) { */
/* if (A[i] != i*TRIALS) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); */
/* fail = 1; */
/* } */
/* } else if(i >= 496) */
/* if (A[i] != -((i-496)*TRIALS)) { */
/* printf("Error at %d, h = %lf, d = %lf\n", i, (double) -((i-496)*TRIALS), A[i]); */
/* fail = 1; */
/* } */
/* } */
/* if(fail) printf("Failed\n"); */
/* else printf("Succeeded\n"); */
//
// Test: private
//
ZERO(A);
fail = 0;
int a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams) private(a)
{
a = omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: firstprivate
//
ZERO(A);
fail = 0;
a = 10;
num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams num_teams(num_teams) firstprivate(a)
{
a += omp_get_team_num();
A[omp_get_team_num()] += a;
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != 10+i*TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) (10+i*TRIALS), A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
opencl_keyring_fmt_plug.c | /*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>,
* Copyright (c) 2012 Dhiru Kholia <dhiru at openwall.com> and
* Copyright (c) 2012-2014 magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_keyring;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_keyring);
#else
#include <string.h>
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "common-opencl.h"
#include "options.h"
#include "sha2.h"
#include "md5.h"
#include "stdint.h"
#define FORMAT_LABEL "keyring-opencl"
#define FORMAT_NAME "GNOME Keyring"
#define FORMAT_TAG "$keyring$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA256 OpenCL AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PLAINTEXT_LENGTH (55-8)
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define SALTLEN 8
typedef unsigned char guchar; /* How many aliases do we need?! */
typedef unsigned int guint;
typedef int gint;
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} keyring_password;
typedef struct {
uint8_t key[16];
uint8_t iv[16];
} keyring_hash;
typedef struct {
uint32_t length;
uint32_t iterations;
uint8_t salt[SALTLEN];
} keyring_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt {
unsigned int iterations;
unsigned char salt[SALTLEN];
unsigned int crypto_size;
unsigned int inlined;
unsigned char ct[LINE_BUFFER_SIZE / 2]; /* after hex conversion */
} *cur_salt;
static struct fmt_tests keyring_tests[] = {
{"$keyring$db1b562e453a0764*3221*16*0*02b5c084e4802369c42507300f2e5e56", "openwall"},
{"$keyring$4f3f1557a7da17f5*2439*144*0*12215fabcff6782aa23605ab2cd843f7be9477b172b615eaa9130836f189d32ffda2e666747378f09c6e76ad817154daae83a36c0a0a35f991d40bcfcba3b7807ef57a0ce4c7f835bf34c6e358f0d66aa048d73dacaaaf6d7fa4b3510add6b88cc237000ff13cb4dbd132db33be3ea113bedeba80606f86662cc226af0dad789c703a7df5ad8700542e0f7a5e1f10cf0", "password"},
{NULL}
};
static keyring_password *inbuffer;
static keyring_hash *outbuffer;
static keyring_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
#define insize (sizeof(keyring_password) * global_work_size)
#define outsize (sizeof(keyring_hash) * global_work_size)
#define settingsize (sizeof(keyring_salt))
#define cracked_size (sizeof(*cracked) * global_work_size)
#define STEP 0
#define SEED 256
static const char * warn[] = {
"xfer: " , ", crypt: " , ", xfer: "
};
//This file contains auto-tuning routine(s). It has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t global_work_size, struct fmt_main *self)
{
cl_int cl_error;
inbuffer = (keyring_password*) mem_calloc(1, insize);
outbuffer = (keyring_hash*) mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
cl_int cl_error;
snprintf(build_opts, sizeof(build_opts),
"-DPLAINTEXT_LENGTH=%d -DSALTLEN=%d",
PLAINTEXT_LENGTH, SALTLEN);
opencl_init("$JOHN/kernels/keyring_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "keyring", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(keyring_password), 0, db);
//Auto tune execution from shared/included code.
autotune_run(self, 1, 0, cpu(device_info[gpu_id]) ?
500000000ULL : 1000000000ULL);
}
}
static int looks_like_nice_int(char *p)
{
// reasonability check + avoids atoi's UB
if (strlen(p) > 9)
return 0;
for (; *p; p++)
if (*p < '0' || *p > '9')
return 0;
return 1;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int ctlen, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (keeptr == NULL)
goto err;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != SALTLEN * 2 || extra)
goto err;
while (*p)
if (atoi16[ARCH_INDEX(*p++)] == 0x7f)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!looks_like_nice_int(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* crypto size */
goto err;
if (!looks_like_nice_int(p))
goto err;
ctlen = atoi(p);
if (ctlen > sizeof(cur_salt->ct))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* inlined - unused? TODO */
goto err;
if (!looks_like_nice_int(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if (ctlen > LINE_BUFFER_SIZE)
goto err;
if (hexlenl(p, &extra) != ctlen * 2 || extra)
goto err;
if (strlen(p) < 32) /* this shouldn't happen for valid hashes */
goto err;
while (*p)
if (atoi16l[ARCH_INDEX(*p++)] == 0x7f)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
if (!cur_salt)
cur_salt = mem_alloc_tiny(sizeof(struct custom_salt),
MEM_ALIGN_WORD);
ctcopy += FORMAT_TAG_LEN; /* skip over "$keyring$" */
p = strtokm(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.crypto_size = atoi(p);
p = strtokm(NULL, "*");
cs.inlined = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.crypto_size; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, SALTLEN);
currentsalt.length = SALTLEN;
currentsalt.iterations = cur_salt->iterations;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize,
¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
static void keyring_set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int verify_decrypted_buffer(unsigned char *buffer, int len)
{
guchar digest[16];
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, buffer + 16, len - 16);
MD5_Final(digest, &ctx);
return memcmp(buffer, digest, 16) == 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]),
"Run kernel");
BENCH_CLERROR(clFinish(queue[gpu_id]), "clFinish");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_FALSE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
/// Await completion of all the above
BENCH_CLERROR(clFinish(queue[gpu_id]), "clFinish");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char buffer[LINE_BUFFER_SIZE / 2];
unsigned char iv[16];
AES_KEY akey;
unsigned char *p = outbuffer[index].iv;
memcpy(iv, p, 16);
memcpy(buffer, cur_salt->ct, cur_salt->crypto_size);
memset(&akey, 0, sizeof(AES_KEY));
if (AES_set_decrypt_key(outbuffer[index].key, 128, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
AES_cbc_encrypt(buffer, buffer, cur_salt->crypto_size, &akey, iv, AES_DECRYPT);
if (verify_decrypted_buffer(buffer, cur_salt->crypto_size))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_opencl_keyring = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
keyring_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
keyring_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
linalg.c | #include "linalg.h"
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define ELEMENT double
ELEMENT squared_norm_of_diff(ELEMENT *vect_a, ELEMENT *vect_b, int size) {
ELEMENT sum = 0, tmp = 0;
for (int i = 0; i < size; i++) {
tmp = vect_a[i] - vect_b[i];
sum += tmp * tmp;
}
return sum;
}
ELEMENT squared_norm_of_diff_parallel(ELEMENT *vect_a, ELEMENT *vect_b,
int size) {
ELEMENT sum = 0;
#pragma omp parallel for shared(vect_a, vect_b, size) default(none) reduction(+: sum)
for (int i = 0; i < size; i++) {
ELEMENT tmp = vect_a[i] - vect_b[i];
sum += tmp * tmp;
}
return sum;
}
|
lastpass_sniffed_fmt_plug.c | /* LastPass sniffed session cracker patch for JtR. Hacked together during
* November of 2012 by Dhiru Kholia <dhiru at openwall.com>.
*
* Burp Suite is awesome. Open-source it!
*
* This software is Copyright (c) 2012 Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Jan, 2015, JimF. Fixed salt-dupe problem. Now salt ONLY depends upon
* unencrypted user name, so we have real salt-dupe removal.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sniffed_lastpass;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sniffed_lastpass);
#else
#include <string.h>
#include <errno.h>
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "base64_convert.h"
#include "aes.h"
#include "pbkdf2_hmac_sha256.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "LastPass"
#define FORMAT_NAME "sniffed sessions"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 AES " SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 55
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 4
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
/* sentms=1352643586902&xml=2&username=hackme%40mailinator.com&method=cr&hash=4c11d8717015d92db74c42bc1a2570abea3fa18ab17e58a51ce885ee217ccc3f&version=2.0.15&encrypted_username=i%2BhJCwPOj5eQN4tvHcMguoejx4VEmiqzOXOdWIsZKlk%3D&uuid=aHnPh8%40NdhSTWZ%40GJ2fEZe%24cF%40kdzdYh&lang=en-US&iterations=500&sessonly=0&otp=&sesameotp=&multifactorresponse=&lostpwotphash=07a286341be484fc3b96c176e611b10f4d74f230c516f944a008f960f4ec8870&requesthash=i%2BhJCwPOj5eQN4tvHcMguoejx4VEmiqzOXOdWIsZKlk%3D&requestsrc=cr&encuser=i%2BhJCwPOj5eQN4tvHcMguoejx4VEmiqzOXOdWIsZKlk%3D&hasplugin=2.0.15
* decodeURIComponent("hackme%40mailinator.com")
* decodeURIComponent("i%2BhJCwPOj5eQN4tvHcMguoejx4VEmiqzOXOdWIsZKlk%3D") */
/* C:\Users\Administrator\AppData\Local\Google\Chrome\User Data\Default\Extensions\hdokiejnpimakedhajhdlcegeplioahd\2.0.15_0
* lpfulllib.js and server.js are main files involved */
static struct fmt_tests lastpass_tests[] = {
{"$lastpass$hackme@mailinator.com$500$i+hJCwPOj5eQN4tvHcMguoejx4VEmiqzOXOdWIsZKlk=", "openwall"},
{"$lastpass$pass_gen@generated.com$500$vgC0g8BxOi4MerkKfZYFFSAJi8riD7k0ROLpBEA3VJk=", "password"},
// get one with salt under 16 bytes.
{"$lastpass$1@short.com$500$2W/GA8d2N+Z4HGvRYs2R7w==", "password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[4];
static struct custom_salt {
unsigned int iterations;
unsigned int length;
char username[129];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, "$lastpass$", 10) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 10;
if ((p = strtokm(ctcopy, "$")) == NULL) /* username */
goto err;
if (strlen(p) > 128)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data */
goto err;
if (strlen(p) > 50) /* not exact! */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 10; /* skip over "$lastpass$" */
p = strtokm(ctcopy, "$");
i = strlen(p);
if (i > 16)
i = 16;
cs.length = i; /* truncated length */
strncpy(cs.username, p, 128);
p = strtokm(NULL, "$");
cs.iterations = atoi(p);
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static unsigned int out[4];
char Tmp[48];
char *p;
ciphertext += 10;
p = strchr(ciphertext, '$')+1;
p = strchr(p, '$')+1;
base64_convert(p, e_b64_mime, strlen(p), Tmp, e_b64_raw, sizeof(Tmp), 0);
memcpy(out, Tmp, 16);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
ARCH_WORD_32 key[MAX_KEYS_PER_CRYPT][8];
unsigned i;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT];
union {
ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
x.pout[i] = key[i];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, (unsigned char*)cur_salt->username, strlen(cur_salt->username), cur_salt->iterations, &(x.poutc), 32, 0);
#else
pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), (unsigned char*)cur_salt->username, strlen(cur_salt->username), cur_salt->iterations, (unsigned char*)(&key[0]),32,0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
unsigned char *Key = (unsigned char*)key[i];
AES_KEY akey;
unsigned char iv[16];
unsigned char out[32];
if(AES_set_encrypt_key(Key, 256, &akey) < 0) {
fprintf(stderr, "AES_set_encrypt_key failed in crypt!\n");
}
memset(iv, 0, sizeof(iv));
AES_cbc_encrypt((const unsigned char*)cur_salt->username, out, 32, &akey, iv, AES_ENCRYPT);
memcpy(crypt_key[index+i], out, 16);
}
}
return count;
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
if ( ((ARCH_WORD_32*)binary)[0] == crypt_key[index][0] )
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void lastpass_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_sniffed_lastpass = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
lastpass_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
lastpass_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
t002.c | #include<stdint.h>
#include<stdlib.h>
#include<stdio.h>
#include<omp.h>
typedef struct {int64_t nteam; int64_t nthread; int64_t team_n; int64_t thread_n;} tinfo;
int
main(int argc, char **argv)
{
const int64_t narr = 1 << 10;
tinfo tinit = {-1, -1, -1, -1};
tinfo *t = aligned_alloc(1 << 22, sizeof(tinfo)*narr);
for(int64_t i = 0; i < narr; ++i) t[i] = tinit;
#pragma omp target teams distribute parallel for simd map(t[0:narr]) aligned(t)
for(int64_t i = 0; i < narr; ++i){
t[i].nteam = omp_get_num_teams();
t[i].nthread = omp_get_num_threads();
t[i].team_n = omp_get_team_num();
t[i].thread_n = omp_get_thread_num();
}
for(int64_t i = 0; i < narr; ++i){
printf("%4ld: nteam: %ld nthread: %ld team_n: %ld thread_n: %ld\n",
i, t[i].nteam, t[i].nthread, t[i].team_n, t[i].thread_n);
}
int ret = 0;
//if(t->nteam <= 0 || t->nthread <= 0) ret = 1;
free(t);
return ret;
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
template<typename _LhsScalar, typename _RhsScalar> class ei_level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,RowMajor>
{
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
Scalar* res, Index resStride,
Scalar alpha,
ei_level3_blocking<Scalar,Scalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
ei_general_matrix_matrix_product<Scalar, Index,
RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateRhs,
LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs>
struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor>
{
static void run(Index rows, Index cols, Index depth,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
Scalar alpha,
ei_level3_blocking<Scalar,Scalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs)
alpha = ei_conj(alpha);
typedef typename ei_packet_traits<Scalar>::type PacketType;
typedef ei_product_blocking_traits<Scalar> Blocking;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = std::min(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
ei_gemm_pack_rhs<Scalar, Index, Blocking::nr, RhsStorageOrder> pack_rhs;
ei_gemm_pack_lhs<Scalar, Index, Blocking::mr, LhsStorageOrder> pack_lhs;
ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr*8;
Scalar* w = ei_aligned_stack_new(Scalar, sizeW);
Scalar* blockB = blocking.blockB();
ei_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*kc, &rhs(k,info[tid].rhs_start), rhsStride, alpha, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*kc, mc, actual_kc, info[j].rhs_length, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = std::min(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
#pragma omp atomic
--(info[j].users);
}
ei_aligned_stack_delete(Scalar, blockA, kc*mc);
ei_aligned_stack_delete(Scalar, w, sizeW);
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr;
Scalar *blockA = blocking.blockA()==0 ? ei_aligned_stack_new(Scalar, sizeA) : blocking.blockA();
Scalar *blockB = blocking.blockB()==0 ? ei_aligned_stack_new(Scalar, sizeB) : blocking.blockB();
Scalar *blockW = blocking.blockW()==0 ? ei_aligned_stack_new(Scalar, sizeW) : blocking.blockW();
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = std::min(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, alpha, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = std::min(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, -1, -1, 0, 0, blockW);
}
}
if(blocking.blockA()==0) ei_aligned_stack_delete(Scalar, blockA, kc*mc);
if(blocking.blockB()==0) ei_aligned_stack_delete(Scalar, blockB, sizeB);
if(blocking.blockW()==0) ei_aligned_stack_delete(Scalar, blockW, sizeW);
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to ei_general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct ei_traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: ei_traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct ei_gemm_functor
{
ei_gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
(const Scalar*)&(m_lhs.const_cast_derived().coeffRef(row,0)), m_lhs.outerStride(),
(const Scalar*)&(m_rhs.const_cast_derived().coeffRef(0,col)), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class ei_gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class ei_level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
ei_level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class ei_gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, true>
: public ei_level3_blocking<
typename ei_meta_if<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::ret,
typename ei_meta_if<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::ret>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename ei_meta_if<Transpose,_RhsScalar,_LhsScalar>::ret LhsScalar;
typedef typename ei_meta_if<Transpose,_LhsScalar,_RhsScalar>::ret RhsScalar;
typedef ei_product_blocking_traits<RhsScalar> Blocking;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Blocking::nr * ei_packet_traits<RhsScalar>::size
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
ei_gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class ei_gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, false>
: public ei_level3_blocking<
typename ei_meta_if<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::ret,
typename ei_meta_if<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::ret>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename ei_meta_if<Transpose,_RhsScalar,_LhsScalar>::ret LhsScalar;
typedef typename ei_meta_if<Transpose,_LhsScalar,_RhsScalar>::ret RhsScalar;
typedef ei_product_blocking_traits<RhsScalar> Blocking;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
ei_gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*ei_packet_traits<RhsScalar>::size*Blocking::nr;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = ei_aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = ei_aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = ei_aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~ei_gemm_blocking_space()
{
ei_aligned_delete(this->m_blockA, m_sizeA);
ei_aligned_delete(this->m_blockB, m_sizeB);
ei_aligned_delete(this->m_blockW, m_sizeW);
}
};
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
EIGEN_STATIC_ASSERT((ei_is_same_type<typename Lhs::Scalar, typename Rhs::Scalar>::ret),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
}
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
{
ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef ei_gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef ei_gemm_functor<
Scalar, Index,
ei_general_matrix_matrix_product<
Scalar, Index,
(_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
(_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
ei_parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
GB_binop__eq_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__eq_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fc64)
// C=scalar+B GB (_bind1st__eq_fc64)
// C=scalar+B' GB (_bind1st_tran__eq_fc64)
// C=A+scalar GB (_bind2nd__eq_fc64)
// C=A'+scalar GB (_bind2nd_tran__eq_fc64)
// C type: bool
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_eq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = (creal (GBX (Ax, pA, A_iso)) != 0) || (cimag (GBX (Ax, pA, A_iso)) != 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = (creal (GBX (Bx, pB, B_iso)) != 0) || (cimag (GBX (Bx, pB, B_iso)) != 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_eq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FC64 || GxB_NO_EQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_eq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_eq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_eq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_eq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
move.h | #pragma once
#include "core.h"
#include "energy.h"
#include "average.h"
//#include "analysis.h"
#include "potentials.h"
#include "mpi.h"
namespace Faunus {
namespace Move {
class Movebase {
private:
virtual void _move(Change&)=0; //!< Perform move and modify change object
virtual void _accept(Change&); //!< Call after move is accepted
virtual void _reject(Change&); //!< Call after move is rejected
virtual void _to_json(json &j) const=0; //!< Extra info for report if needed
virtual void _from_json(const json &j)=0; //!< Extra info for report if needed
TimeRelativeOfTotal<std::chrono::microseconds> timer;
protected:
unsigned long cnt=0;
unsigned long accepted=0;
unsigned long rejected=0;
public:
static Random slump; //!< Shared for all moves
std::string name; //!< Name of move
std::string cite; //!< Reference
int repeat=1; //!< How many times the move should be repeated per sweep
void from_json(const json &j);
void to_json(json &j) const; //!< JSON report w. statistics, output etc.
void move(Change &change); //!< Perform move and modify given change object
void accept(Change &c);
void reject(Change &c);
virtual double bias(Change &c, double uold, double unew); //!< adds extra energy change not captured by the Hamiltonian
};
void from_json(const json &j, Movebase &m); //!< Configure any move via json
void to_json(json &j, const Movebase &m);
/**
* @brief Swap the charge of a single atom
*/
template<typename Tspace>
class AtomicSwapCharge : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
Tspace& spc; // Space to operate on
int molid=-1;
double ln10 = log(10);
double pKa, pH;
Average<double> msqd; // mean squared displacement
double _sqd, _bias; // squared displament
std::string molname; // name of molecule to operate on
Change::data cdata;
void _to_json(json &j) const override {
j = {
{"pH", pH},
{"pka", pKa},
{"molid", molid},
{u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())},
{"molecule", molname}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
pH = j.at("pH").get<double>();
pKa = j.at("pKa").get<double>();
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end()); // repeat for each molecule...
if (repeat>0)
repeat = repeat * v.front().size(); // ...and for each atom
}
}
catch (std::exception &e) {
std::cerr << name << ": " << e.what();
throw;
}
} //!< Configure via json object
typename Tpvec::iterator randomAtom() {
assert(molid>=0);
auto mollist = spc.findMolecules( molid ); // all `molid` groups
if (size(mollist)>0) {
auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator
if (!git->empty()) {
auto p = slump.sample( git->begin(), git->end() ); // random particle iterator
cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group
return p;
}
}
return spc.p.end();
}
void _move(Change &change) override {
auto p = randomAtom();
if (p!=spc.p.end()) {
auto& g = spc.groups[cdata.index];
double oldcharge = p->charge;
p->charge = fabs(oldcharge - 1);
_sqd = fabs(oldcharge - 1) - oldcharge;
change.groups.push_back( cdata ); // add to list of moved groups
_bias = _sqd*(pH-pKa)*ln10; // one may add bias here...
}
}
double bias(Change &change, double uold, double unew) override {
return _bias;
} //!< adds extra energy change not captured by the Hamiltonian
void _accept(Change &change) override { msqd += _sqd; }
void _reject(Change &change) override { msqd += 0; }
public:
AtomicSwapCharge(Tspace &spc) : spc(spc) {
name = "swapcharge";
repeat = -1; // meaning repeat N times
cdata.atoms.resize(1);
cdata.internal=true;
}
};
/**
* @brief Translate and rotate a molecular group
*/
template<typename Tspace>
class AtomicTranslateRotate : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
Tspace& spc; // Space to operate on
int molid=-1;
Point dir={1,1,1};
Average<double> msqd; // mean squared displacement
double _sqd; // squared displament
std::string molname; // name of molecule to operate on
Change::data cdata;
void _to_json(json &j) const override {
j = {
{"dir", dir},
{"molid", molid},
{u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())},
{"molecule", molname}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
dir = j.value("dir", Point(1,1,1));
if (repeat<0) {
auto v = spc.findMolecules(molid, Tspace::ALL );
repeat = std::distance(v.begin(), v.end()); // repeat for each molecule...
if (repeat>0)
repeat = repeat * v.front().size(); // ...and for each atom
}
}
catch (std::exception &e) {
std::cerr << name << ": " << e.what();
throw;
}
} //!< Configure via json object
typename Tpvec::iterator randomAtom() {
assert(molid>=0);
//std::cout<<"molid "<<molid<<std::endl;
auto mollist = spc.findMolecules( molid, Tspace::ALL ); // all `molid` groups
if (size(mollist)>0) {
//std::cout<<"looking for atoms"<<std::endl;
auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator
if (!git->empty()) {
//std::cout<<"found molecule"<<std::endl;
auto p = slump.sample( git->begin(), git->end() ); // random particle iterator
cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group
return p;
}
}
return spc.p.end();
}
void _move(Change &change) override {
auto p = randomAtom();
if (p!=spc.p.end()) {
double dp = atoms.at(p->id).dp;
double dprot = atoms.at(p->id).dprot;
auto& g = spc.groups[cdata.index];
if (dp>0) { // translate
Point oldpos = p->pos;
p->pos += 0.5 * dp * ranunit(slump).cwiseProduct(dir);
spc.geo.boundaryFunc(p->pos);
_sqd = spc.geo.sqdist(oldpos, p->pos); // squared displacement
if (!g.atomic)
g.cm = Geometry::massCenter(g.begin(), g.end(), spc.geo.boundaryFunc, -g.cm);
}
if (dprot>0) { // rotate
Point u = ranunit(slump);
double angle = dprot * (slump()-0.5);
Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) );
p->rotate(Q, Q.toRotationMatrix());
}
if (dp>0 || dprot>0)
change.groups.push_back( cdata ); // add to list of moved groups
}
//else
// std::cerr << name << ": no atoms found" << std::endl;
}
void _accept(Change &change) override { msqd += _sqd; }
void _reject(Change &change) override { msqd += 0; }
public:
AtomicTranslateRotate(Tspace &spc) : spc(spc) {
name = "transrot";
repeat = -1; // meaning repeat N times
cdata.atoms.resize(1);
cdata.internal=true;
}
};
/**
* @brief Translate and rotate a molecular group
*/
template<typename Tspace>
class TranslateRotate : public Movebase {
protected:
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc; // Space to operate on
int molid=-1;
double dptrans=0;
double dprot=0;
Point dir={1,1,1};
double _sqd; // squared displacement
Average<double> msqd; // mean squared displacement
void _to_json(json &j) const override {
j = {
{"dir", dir}, {"dp", dptrans}, {"dprot", dprot},
{"molid", molid},
{u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())},
{"molecule", molecules<Tpvec>[molid].name}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
std::string molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
dir = j.value("dir", Point(1,1,1));
dprot = j.at("dprot");
dptrans = j.at("dp");
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end());
}
}
catch (std::exception &e) {
throw std::runtime_error(name+": " + e.what());
}
} //!< Configure via json object
void _move(Change &change) override {
assert(molid>=0);
assert(!spc.groups.empty());
assert(spc.geo.getVolume()>0);
// pick random group from the system matching molecule type
// TODO: This can be slow -- implement look-up-table in Space
auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid'
if (size(mollist)>0) {
auto it = slump.sample( mollist.begin(), mollist.end() );
if (!it->empty()) {
assert(it->id==molid);
if (dptrans>0) { // translate
Point oldcm = it->cm;
Point dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans;
it->translate( dp, spc.geo.boundaryFunc );
_sqd = spc.geo.sqdist(oldcm, it->cm); // squared displacement
}
if (dprot>0) { // rotate
Point u = ranunit(slump);
double angle = dprot * (slump()-0.5);
Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) );
it->rotate(Q, spc.geo.boundaryFunc);
}
if (dptrans>0||dprot>0) { // define changes
Change::data d;
d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
change.groups.push_back( d ); // add to list of moved groups
}
assert( spc.geo.sqdist( it->cm,
Geometry::massCenter(it->begin(),it->end(),spc.geo.boundaryFunc,-it->cm) ) < 1e-9 );
}
}
}
void _accept(Change &change) override { msqd += _sqd; }
void _reject(Change &change) override { msqd += 0; }
public:
TranslateRotate(Tspace &spc) : spc(spc) {
name = "moltransrot";
repeat = -1; // meaning repeat N times
}
};
/**
* @brief Move that will swap conformation of a molecule
*
* This will swap between different molecular conformations
* as defined in `MoleculeData` with `traj` and `weight`.
* If defined, the weight
* distribution is respected, otherwise all conformations
* have equal intrinsic weight. Upon insertion, the new conformation
* is randomly oriented and placed on top of the mass-center of
* an exising molecule. That is, there is no mass center movement.
*
* @todo Add feature to align molecule on top of an exiting one
* @todo Expand `_info()` to show number of conformations
* @warning Weighted distributions untested and not verified for correctness
* @date Malmo, November 2016
*/
template<class Tspace>
class ConformationSwap : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef MoleculeData<Tpvec> Tmoldata;
RandomInserter<Tmoldata> inserter;
Tspace& spc; // Space to operate on
int molid=-1;
int newconfid=-1;
void _to_json(json &j) const override {
j = {
{"molid", molid},
{"molecule", molecules<Tpvec>[molid].name}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
assert(!molecules<Tpvec>.empty());
try {
std::string molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
if ( molecules<Tpvec>[molid].conformations.size()<2)
throw std::runtime_error("minimum two conformations required");
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end());
}
}
catch (std::exception &e) {
throw std::runtime_error(name+": " + e.what());
}
} //!< Configure via json object
void _move(Change &change) override {
assert(molid>=0);
assert(change.empty());
auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid'
if ( size(mollist)>0 ) {
auto g = slump.sample( mollist.begin(), mollist.end() );
if (not g->empty()) {
inserter.offset = g->cm;
// Get a new conformation that should be properly wrapped around the boundaries
// (if applicable) and have the same mass-center as "g->cm".
Tpvec p = inserter(spc.geo, spc.p, molecules<Tpvec>[molid]);
if (p.size() not_eq g->size())
throw std::runtime_error(name + ": conformation atom count mismatch");
newconfid = molecules<Tpvec>[molid].conformations.index;
std::copy( p.begin(), p.end(), g->begin() ); // override w. new conformation
#ifndef NDEBUG
// this move shouldn't move mass centers, so let's check if this is true:
Point newcm = Geometry::massCenter(p.begin(), p.end(), spc.geo.boundaryFunc, -g->cm);
if ( (newcm - g->cm).norm()>1e-6 )
throw std::runtime_error(name + ": unexpected mass center movement");
#endif
Change::data d;
d.index = Faunus::distance(spc.groups.begin(), g); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
d.internal = false; // we *don't* want to calculate the internal energy
change.groups.push_back( d ); // add to list of moved groups
}
}
}
void _accept(Change &change) override {
assert(change.groups.size()==1);
spc.groups[ change.groups.front().index ].confid = newconfid;
}
public:
ConformationSwap(Tspace &spc) : spc(spc) {
name = "conformationswap";
repeat = -1; // meaning repeat n times
inserter.dir = {0,0,0};
inserter.rotate = true;
}
}; // end of conformation swap move
/**
* @brief Sketch for MD move
*/
template<typename Tspace>
class ForceMove : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
void _to_json(json &j) const {};
void _from_json(const json &j) {};
std::vector<Point> forces, velocities;
public:
ForceMove() {
// resize forces and velocities to mathc spc.p
}
}; // end of forcemove
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("[Faunus] TranslateRotate")
{
typedef Particle<Radius, Charge, Dipole, Cigar> Tparticle;
typedef Space<Geometry::Cuboid, Tparticle> Tspace;
typedef typename Tspace::Tpvec Tpvec;
CHECK( !atoms.empty() ); // set in a previous test
CHECK( !molecules<Tpvec>.empty() ); // set in a previous test
Tspace spc;
TranslateRotate<Tspace> mv(spc);
json j = R"( {"molecule":"B", "dp":1.0, "dprot":0.5, "dir":[0,1,0], "repeat":2 })"_json;
mv.from_json(j);
j = json(mv).at(mv.name);
CHECK( j.at("molecule") == "B");
CHECK( j.at("dir") == Point(0,1,0) );
CHECK( j.at("dp") == 1.0 );
CHECK( j.at("repeat") == 2 );
CHECK( j.at("dprot") == 0.5 );
}
#endif
template<typename Tspace>
class VolumeMove : public Movebase {
private:
const std::map<std::string, Geometry::VolumeMethod> methods = {
{"xy", Geometry::XY},
{"isotropic", Geometry::ISOTROPIC},
{"isochoric", Geometry::ISOCHORIC}
};
typename decltype(methods)::const_iterator method;
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc;
Average<double> msqd; // mean squared displacement
double dV=0, deltaV=0, Vnew=0, Vold=0;
void _to_json(json &j) const override {
using namespace u8;
j = {
{"dV", dV}, {"method", method->first},
{rootof + bracket(Delta + "V" + squared), std::sqrt(msqd.avg())},
{cuberoot + rootof + bracket(Delta + "V" + squared),
std::cbrt(std::sqrt(msqd.avg()))}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
method = methods.find( j.value("method", "isotropic") );
if (method==methods.end())
std::runtime_error("unknown volume change method");
dV = j.at("dV");
}
void _move(Change &change) override {
if (dV>0) {
change.dV=true;
change.all=true;
Vold = spc.geo.getVolume();
if (method->second == Geometry::ISOCHORIC)
Vold = std::pow(Vold,1.0/3.0); // volume is constant
Vnew = std::exp(std::log(Vold) + (slump()-0.5) * dV);
deltaV = Vnew-Vold;
spc.scaleVolume(Vnew, method->second);
} else deltaV=0;
}
void _accept(Change &change) override { msqd += deltaV*deltaV; }
void _reject(Change &change) override { msqd += 0; }
public:
VolumeMove(Tspace &spc) : spc(spc) {
name = "volume";
repeat = 1;
}
}; // end of VolumeMove
/*
* @brief Establishes equilibrium of matter
* Establishes equilibrium of matter between all species
*
* Consider the dissociation process AX=A+X. This class will locate
* all species of type AX and A and make a MC swap move between them.
* X is implicit, meaning that it enters only with its chemical potential
* (activity). The titrating species, their dissociation constants
* and the chemical potential of the titrant are read from a
* `processes` JSON object.
* For example, for proton titration of phosphate one would
* use the following JSON input (pH 7.0):
*
* @todo
* Implement classification of reactions to group weight in
* mc sweep {refrerence : prob(reference)}
*
*/
template<typename Tspace>
class SpeciationMove : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc;
Tspace *otherspc;
ReactionData<Tpvec> *trialprocess;
std::map<std::string, Average<double>> accmap;
double lnK;
bool forward;
std::vector<int> molDel; // index of groups to delete
std::vector<int> atomDel; // atom index to delete
std::map<int, int> molcnt_ins, atomcnt_ins,
molcnt_del, atomcnt_del,
molcnt, atomcnt; // id's and number of inserted/deleted mols and atoms
std::multimap<int, Tpvec> pmap; // coordinates of mols and atoms to be inserted
unsigned int Ndeleted, Ninserted; // Number of accepted deletions and insertions
void _to_json(json &j) const override {
j = {
// { "replicas", mpi.nproc() },
// { "datasize", pt.getFormat() }
};
json &_j = j["reactions"];
_j = json::object();
for (auto &m : accmap)
_j[m.first] = {
{"attempts", m.second.cnt},
{"acceptance", m.second.avg()}
};
}
void _from_json(const json &j) override {
//j["speciation"] = "speciation";
}
public:
SpeciationMove(Tspace &spc) : spc(spc) {
name = "speciation";
repeat = 1;
}
void setOther(Tspace &ospc) {
otherspc = &ospc;
}
double energy(); //!< Returns intrinsic energy of the process
void _move(Change &change) override {
if ( reactions<Tpvec>.size()>0 ) {
auto rit = slump.sample( reactions<Tpvec>.begin(), reactions<Tpvec>.end() );
lnK = rit->lnK;
forward = (bool)slump.range(0,1); // random boolean
trialprocess = &(*rit);
if ( rit->empty(forward) ) // Enforce canonic constraint if invoked
return; //Out of material, slip out the back door
for (auto &m : rit->Molecules2Add( !forward )) { // Delete checks
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
if( size(mollist)!=1 ) // There can be only one
throw std::runtime_error("Bad definition: One group per atomic molecule!");
auto git = mollist.begin();
if ( git->size() < m.second ) // assure that there are atoms enough in the group
return;
} else {
mollist = spc.findMolecules( m.first, Tspace::ACTIVE);
if ( size(mollist) < m.second )
return; // Not possible to perform change, escape through the back door
}
}
for (auto &m : rit->Molecules2Add( forward )) { // Addition checks
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
if( size(mollist)!=1 ) // There can be only one
throw std::runtime_error("Bad definition: One group per atomic molecule!");
auto git = mollist.begin();
if ( (git->size() + m.second) > git->capacity() ) // assure that there are atoms enough in the group
return; // if not slip out the back door
} else {
mollist = spc.findMolecules( m.first, Tspace::INACTIVE);
if ( size(mollist) < m.second )
return; // Not possible to perform change, escape through the back door
}
}
//The move is doable, raise flag
change.dNpart=true;
for (auto &m : rit->Molecules2Add( !forward )) { // Delete
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
if( size(mollist)!=1 ) // There can be only one
throw std::runtime_error("Bad definition: One group per atomic molecule!");
Change::data d;
auto git = mollist.begin();
auto othermollist = otherspc->findMolecules(m.first, Tspace::ALL); // implies that new and old are in sync
auto othergit=othermollist.begin();
d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
d.internal = true;
d.dNpart = true;
for ( int N=0; N<m.second; N++ ) { // deactivate m.second m.first atoms
auto ait = slump.sample( git->begin(), git->end()); // iterator to random atom
// Shuffle back to end, both in trial and new
auto nait = git->end()-1; //iterator to last atom
int dist = Faunus::distance( ait, git->end() ); // distance to random atom from end
if ( Faunus::distance( ait, nait) > 1 ) {
std::iter_swap(ait, nait);
std::iter_swap(othergit->end()-dist-N, othergit->end() - (1+N) );
}
d.atoms.push_back ( Faunus::distance(git->begin(), nait) );
git->deactivate( nait, git->end());
}
std::sort( d.atoms.begin(), d.atoms.end() );
change.groups.push_back( d ); // add to list of moved groups
spc.moltracker[m.first] -= m.second;
} else {
mollist = spc.findMolecules( m.first, Tspace::ACTIVE);
for ( int N=0; N <m.second; N++ ) {
Change::data d;
auto git = slump.sample(mollist.begin(), mollist.end());
git->deactivate( git->begin(), git->end());
d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
d.internal = true;
for (int i=0; i<git->capacity(); i++)
d.atoms.push_back(i);
change.groups.push_back( d ); // add to list of moved groups
mollist = spc.findMolecules( m.first , Tspace::ACTIVE);
// Activate/deactivate all? simply move end to front?
}
spc.moltracker[m.first] -= m.second;
}
}
for (auto &m : rit->Molecules2Add( forward )) { // Add
auto mollist = spc.findMolecules( m.first, Tspace::ALL);
if ( molecules<Tpvec>[m.first].atomic ) {
Change::data d;
auto git = mollist.begin();
d.index = Faunus::distance( spc.groups.begin(), git);
d.internal = true;
d.dNpart = true;
for ( int N=0; N<m.second; N++ ) { // activate m.second m.first atoms
git->activate( git->end(), git->end() + 1);
auto ait = git->end()-1;
spc.geo.randompos(ait->pos, slump);
spc.geo.boundaryFunc(ait->pos);
d.atoms.push_back( Faunus::distance(git->begin(), ait) ); // index of particle rel. to group
}
std::sort( d.atoms.begin(), d.atoms.end());
change.groups.push_back( d ); // add to list of moved groups
spc.moltracker[m.first] += m.second;
} else {
mollist = spc.findMolecules( m.first, Tspace::INACTIVE);
if ( size(mollist) < m.second ) {
change.dNpart=false;
return; // Not possible to perform change, escape through the back door
}
for ( int N=0; N <m.second; N++ ) {
Change::data d;
auto git = slump.sample(mollist.begin(), mollist.end());
git->activate( git->inactive().begin(), git->inactive().end());
Point newpoint; // = git->cm;
spc.geo.randompos(newpoint, random);
git->translate( -git->cm, spc.geo.boundaryFunc );
git->translate( newpoint, spc.geo.boundaryFunc );
Point u = ranunit(slump);
//double angle = dprot * (slump()-0.5);
Eigen::Quaterniond Q( Eigen::AngleAxisd(2*pc::pi*random(), u) );
git->rotate(Q, spc.geo.boundaryFunc);
d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group
d.all = true; // *all* atoms in group were moved
d.internal = true;
for (int i=0; i<git->capacity(); i++)
d.atoms.push_back(i);
change.groups.push_back( d ); // add to list of moved groups
assert( spc.geo.sqdist( git->cm,
Geometry::massCenter(git->begin(),git->end(),spc.geo.boundaryFunc, -git->cm ) ) < 1e-9 );
mollist = spc.findMolecules( m.first , Tspace::INACTIVE);
}
spc.moltracker[m.first] += m.second;
}
}
std::sort(change.groups.begin(), change.groups.end() );
} else
throw std::runtime_error("No reactions in list, disable speciation or add reactions");
}
double bias(Change &change, double uold, double unew) override {
if (forward)
return -lnK;
return lnK;
} //!< adds extra energy change not captured by the Hamiltonian
void _accept(Change &change) override {
accmap[ trialprocess->name ] += 1;
trialprocess->N_reservoir += (forward == true) ? -1 : 1;
if( trialprocess->N_reservoir < 0 && trialprocess->canonic == true )
throw std::runtime_error("There are no negative number of molecules");
}
void _reject(Change &change) override {
accmap[ trialprocess->name ] += 0;
}
}; // End of class SpeciationMove
template<typename Tspace>
class Cluster : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tgroup Tgroup;
Tspace& spc;
Average<double> msqd, msqd_angle, N;
double thresholdsq=0, dptrans=0, dprot=0, angle=0, _bias=0;
size_t bias_rejected=0;
Point dir={1,1,1}, dp;
std::vector<std::string> names; // names of molecules to be considered
std::vector<int> ids; // molecule id's of molecules to be considered
std::vector<size_t> index; // index of all possible molecules to be considered
virtual double clusterProbability(const Tgroup &g1, const Tgroup &g2) const {
if (spc.geo.sqdist(g1.cm, g2.cm)<=thresholdsq)
return 1.0;
return 0.0;
}
void _to_json(json &j) const override {
using namespace u8;
j = {
{"threshold", std::sqrt(thresholdsq)}, {"dir", dir}, {"dp", dptrans}, {"dprot", dprot},
{rootof + bracket("r" + squared), std::sqrt(msqd.avg())},
{rootof + bracket(theta + squared) + "/" + degrees, std::sqrt(msqd_angle.avg()) / 1.0_deg},
{bracket("N"), N.avg()},
{"bias rejection rate", double(bias_rejected) / cnt}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
dptrans = j.at("dp");
dir = j.value("dir", Point(1,1,1));
dprot = j.at("dprot");
thresholdsq = std::pow(j.at("threshold").get<double>(), 2);
names = j.at("molecules").get<decltype(names)>(); // molecule names
ids = names2ids(molecules<Tpvec>, names); // names --> molids
index.clear();
for (auto &g : spc.groups)
if (!g.atomic)
if (std::find(ids.begin(), ids.end(), g.id)!=ids.end() )
index.push_back( &g-&spc.groups.front() );
if (repeat<0)
repeat = index.size();
}
/**
* @param spc Space
* @param first Index of initial molecule (randomly selected)
* @param index w. all molecules clustered around first (first included)
*/
void findCluster(Tspace &spc, size_t first, std::set<size_t>& cluster) const {
assert(first < spc.p.size());
std::set<size_t> pool(index.begin(), index.end());
assert(pool.count(first)>0);
cluster.clear();
cluster.insert(first);
pool.erase(first);
size_t n;
do { // find cluster (not very clever...)
n = cluster.size();
for (size_t i : cluster)
if (not spc.groups.at(i).empty()) // check if group is inactive
for (size_t j : pool)
if (not spc.groups.at(j).empty()) // check if group is inactive
if (i!=j) {
// probability to cluster
double P = clusterProbability(spc.groups.at(i), spc.groups.at(j));
if ( Movebase::slump() <= P ) {
cluster.insert(j);
pool.erase(j);
break;
}
}
} while (cluster.size() not_eq n);
// check if cluster is too large
double max = spc.geo.getLength().minCoeff()/2;
for (auto i : cluster)
for (auto j : cluster)
if (j>i)
if (spc.geo.sqdist(spc.groups.at(i).cm, spc.groups.at(j).cm)>=max*max)
throw std::runtime_error(name+": cluster larger than half box length");
}
void _move(Change &change) override {
if (thresholdsq>0 && !index.empty()) {
std::set<size_t> cluster; // all group index in cluster
size_t first = *slump.sample(index.begin(), index.end()); // random molecule (nuclei)
findCluster(spc, first, cluster); // find cluster around first
N += cluster.size(); // average cluster size
Change::data d;
d.all=true;
dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans;
angle = dprot * (slump()-0.5);
Point COM = Geometry::trigoCom(spc, cluster); // cluster center
Eigen::Quaterniond Q;
Q = Eigen::AngleAxisd(angle, ranunit(slump)); // quaternion
for (auto i : cluster) { // loop over molecules in cluster
auto &g = spc.groups[i];
Geometry::rotate(g.begin(), g.end(), Q, spc.geo.boundaryFunc, -COM);
g.cm = g.cm-COM;
spc.geo.boundary(g.cm);
g.cm = Q*g.cm+COM;
spc.geo.boundary(g.cm);
g.translate( dp, spc.geo.boundaryFunc );
d.index=i;
change.groups.push_back(d);
}
#ifndef NDEBUG
Point newCOM = Geometry::trigoCom(spc, cluster);
double _zero = std::sqrt( spc.geo.sqdist(COM,newCOM) ) - dp.norm();
if (fabs(_zero)>1)
std::cerr << _zero << " ";
#endif
// Reject if cluster composition changes during move
// Note: this only works for the binary 0/1 probability function
// currently implemented in `findCluster()`.
std::set<size_t> aftercluster; // all group index in cluster _after_move
findCluster(spc, first, aftercluster); // find cluster around first
if (aftercluster == cluster)
_bias = 0;
else {
_bias = pc::infty;
bias_rejected++;
}
}
}
double bias(Change &change, double uold, double unew) override {
return _bias;
} //!< adds extra energy change not captured by the Hamiltonian
void _reject(Change &change) override { msqd += 0; msqd_angle += 0; }
void _accept(Change &change) override {
msqd += dp.squaredNorm();
msqd_angle += angle*angle;
}
public:
Cluster(Tspace &spc) : spc(spc) {
cite = "doi:10/cj9gnn";
name = "cluster";
repeat = -1; // meaning repeat N times
}
};
template<typename Tspace>
class Pivot : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
std::vector<std::shared_ptr<Potential::BondData>> bonds;
std::vector<int> index; // atom index to rotate
Tspace& spc;
std::string molname;
int molid;
double dprot;
double d2; // cm movement, squared
Average<double> msqd; // cm mean squared displacement
void _to_json(json &j) const override {
using namespace u8;
j = {
{"molecule", molname}, {"dprot", dprot},
{u8::rootof + u8::bracket("r_cm" + u8::squared), std::sqrt(msqd.avg())}
};
_roundjson(j,3);
}
void _from_json(const json &j) override {
dprot = j.at("dprot");
molname = j.at("molecule");
auto it = findName(molecules<Tpvec>, molname);
if (it == molecules<Tpvec>.end())
throw std::runtime_error("unknown molecule '" + molname + "'");
molid = it->id();
bonds = Potential::filterBonds(
molecules<Tpvec>[molid].bonds, Potential::BondData::HARMONIC);
if (repeat<0) {
auto v = spc.findMolecules(molid);
repeat = std::distance(v.begin(), v.end()); // repeat for each molecule...
if (repeat>0)
repeat *= bonds.size();
}
}
void _move(Change &change) override {
d2=0;
if (std::fabs(dprot)>1e-9) {
auto g = spc.randomMolecule(molid, slump); // look for random group
if (g!=spc.groups.end())
if (g->size()>2) { // must at least have three atoms
auto b = slump.sample(bonds.begin(), bonds.end()); // random harmonic bond
if (b != bonds.end()) {
int i1 = (*b)->index.at(0);
int i2 = (*b)->index.at(1);
int offset = std::distance( spc.p.begin(), g->begin() );
index.clear();
if (slump()>0.5)
for (size_t i=i2+1; i<g->size(); i++)
index.push_back(i+offset);
else
for (int i=0; i<i1; i++)
index.push_back(i+offset);
i1+=offset;
i2+=offset;
if (!index.empty()) {
Point oldcm = g->cm;
g->unwrap(spc.geo.distanceFunc); // remove pbc
Point u = (spc.p[i1].pos - spc.p[i2].pos).normalized();
double angle = dprot * (slump()-0.5);
Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) );
auto M = Q.toRotationMatrix();
for (auto i : index) {
spc.p[i].rotate(Q, M); // internal rot.
spc.p[i].pos = Q * ( spc.p[i].pos - spc.p[i1].pos)
+ spc.p[i1].pos; // positional rot.
}
g->cm = Geometry::massCenter(g->begin(), g->end());
g->wrap(spc.geo.boundaryFunc); // re-apply pbc
d2 = spc.geo.sqdist(g->cm, oldcm); // CM movement
Change::data d;
d.index = Faunus::distance( spc.groups.begin(), g ); // integer *index* of moved group
d.all = d.internal = true; // trigger internal interactions
change.groups.push_back( d ); // add to list of moved groups
}
}
}
}
}
void _accept(Change &change) override { msqd += d2; }
void _reject(Change &change) override { msqd += 0; }
public:
Pivot(Tspace &spc) : spc(spc) {
name = "pivot";
repeat = -1; // --> repeat=N
}
}; //!< Pivot move around random harmonic bond axis
#ifdef ENABLE_MPI
/**
* @brief Class for parallel tempering (aka replica exchange) using MPI
*
* Although not completely correct, the recommended way of performing a temper move
* is to do `N` Monte Carlo passes with regular moves and then do a tempering move.
* This is because the MPI nodes must be in sync and if you have a system where
* the random number generator calls are influenced by the Hamiltonian we could
* end up in a deadlock.
*
* @date Lund 2012, 2018
*/
template<class Tspace>
class ParallelTempering : public Movebase {
private:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
Tspace& spc; // Space to operate on
MPI::MPIController& mpi;
int partner; //!< Exchange replica (partner)
enum extradata {VOLUME=0}; //!< Structure of extra data to send
std::map<std::string, Average<double>> accmap;
MPI::FloatTransmitter ft; //!< Class for transmitting floats over MPI
MPI::ParticleTransmitter<Tpvec> pt;//!< Class for transmitting particles over MPI
void findPartner() {
int dr=0;
partner = mpi.rank();
(mpi.random()>0.5) ? dr++ : dr--;
(mpi.rank() % 2 == 0) ? partner+=dr : partner-=dr;
} //!< Find replica to exchange with
bool goodPartner() {
assert(partner!=mpi.rank() && "Selfpartner! This is not supposed to happen.");
if (partner>=0)
if ( partner<mpi.nproc() )
if ( partner!=mpi.rank() )
return true;
return false;
} //!< Is partner valid?
void _to_json(json &j) const override {
j = {
{ "replicas", mpi.nproc() },
{ "datasize", pt.getFormat() }
};
json &_j = j["exchange"];
_j = json::object();
for (auto &m : accmap)
_j[m.first] = {
{"attempts", m.second.cnt},
{"acceptance", m.second.avg()}
};
}
void _move(Change &change) override {
double Vold = spc.geo.getVolume();
findPartner();
Tpvec p; // temperary storage
p.resize(spc.p.size());
if (goodPartner()) {
change.all=true;
pt.sendExtra[VOLUME]=Vold; // copy current volume for sending
pt.recv(mpi, partner, p); // receive particles
pt.send(mpi, spc.p, partner); // send everything
pt.waitrecv();
pt.waitsend();
double Vnew = pt.recvExtra[VOLUME];
if (Vnew<1e-9 || spc.p.size() != p.size())
MPI_Abort(mpi.comm, 1);
if (std::fabs(Vnew-Vold)>1e-9)
change.dV=true;
spc.p = p;
spc.geo.setVolume(Vnew);
// update mass centers
for (auto& g : spc.groups)
if (g.atomic==false)
g.cm = Geometry::massCenter(g.begin(), g.end(),
spc.geo.boundaryFunc, -g.begin()->pos);
}
}
double exchangeEnergy(double mydu) {
std::vector<MPI::FloatTransmitter::floatp> duSelf(1), duPartner;
duSelf[0]=mydu;
duPartner = ft.swapf(mpi, duSelf, partner);
return duPartner.at(0); // return partner energy change
} //!< Exchange energy with partner
double bias(Change &change, double uold, double unew) override {
return exchangeEnergy(unew-uold); // Exchange dU with partner (MPI)
}
std::string id() {
std::ostringstream o;
if (mpi.rank() < partner)
o << mpi.rank() << " <-> " << partner;
else
o << partner << " <-> " << mpi.rank();
return o.str();
} //!< Unique string to identify set of partners
void _accept(Change &change) override {
if ( goodPartner() )
accmap[ id() ] += 1;
}
void _reject(Change &change) override {
if ( goodPartner() )
accmap[ id() ] += 0;
}
void _from_json(const json &j) override {
pt.setFormat( j.value("format", std::string("XYZQI") ) );
}
public:
ParallelTempering(Tspace &spc, MPI::MPIController &mpi ) : spc(spc), mpi(mpi) {
name="temper";
partner=-1;
pt.recvExtra.resize(1);
pt.sendExtra.resize(1);
}
};
#endif
template<typename Tspace>
class Propagator : public BasePointerVector<Movebase> {
private:
int _repeat;
std::discrete_distribution<> dist;
std::vector<double> w; // list of weights for each move
void addWeight(double weight=1) {
w.push_back(weight);
dist = std::discrete_distribution<>(w.begin(), w.end());
_repeat = int(std::accumulate(w.begin(), w.end(), 0.0));
}
public:
using BasePointerVector<Movebase>::vec;
inline Propagator() {}
inline Propagator(const json &j, Tspace &spc, MPI::MPIController &mpi) {
if (j.count("random")==1)
Movebase::slump = j["random"]; // slump is static --> shared for all moves
for (auto &m : j.at("moves")) {// loop over move list
size_t oldsize = vec.size();
for (auto it : m.items()) {
try {
#ifdef ENABLE_MPI
if (it.key()=="temper") this->template push_back<Move::ParallelTempering<Tspace>>(spc, mpi);
#endif
if (it.key()=="moltransrot") this->template push_back<Move::TranslateRotate<Tspace>>(spc);
if (it.key()=="conformationswap") this->template push_back<Move::ConformationSwap<Tspace>>(spc);
if (it.key()=="transrot") this->template push_back<Move::AtomicTranslateRotate<Tspace>>(spc);
if (it.key()=="pivot") this->template push_back<Move::Pivot<Tspace>>(spc);
if (it.key()=="volume") this->template push_back<Move::VolumeMove<Tspace>>(spc);
if (it.key()=="speciation") this->template push_back<Move::SpeciationMove<Tspace>>(spc);
if (it.key()=="cluster") this->template push_back<Move::Cluster<Tspace>>(spc);
if (vec.size()==oldsize+1) {
vec.back()->from_json( it.value() );
addWeight(vec.back()->repeat);
} else
std::cerr << "warning: ignoring unknown move '" << it.key() << "'" << endl;
} catch (std::exception &e) {
throw std::runtime_error("Error adding move '" + it.key() + "': " + e.what());
}
}
}
}
int repeat() { return _repeat; }
auto sample() {
if (!vec.empty()) {
assert(w.size() == vec.size());
return vec.begin() + dist( Move::Movebase::slump.engine );
}
return vec.end();
} //!< Pick move from a weighted, random distribution
};
}//Move namespace
template<class Tgeometry, class Tparticle>
class MCSimulation {
private:
typedef Space<Tgeometry, Tparticle> Tspace;
typedef typename Tspace::Tpvec Tpvec;
bool metropolis(double du) const {
if (std::isnan(du))
throw std::runtime_error("Metropolis error: energy cannot be NaN");
if (du<0)
return true;
return ( Move::Movebase::slump() > std::exp(-du)) ? false : true;
} //!< Metropolis criterion (true=accept)
struct State {
Tspace spc;
Energy::Hamiltonian<Tspace> pot;
State(const json &j) : spc(j), pot(spc,j) {}
void sync(State &other, Change &change) {
spc.sync( other.spc, change );
pot.sync( &other.pot, change );
}
}; //!< Contains everything to describe a state
State state1, // old state
state2; // new state (trial);
double uinit=0, dusum=0;
Average<double> uavg;
void init() {
dusum=0;
Change c; c.all=true;
state1.pot.key = Energy::Energybase::OLD; // this is the old energy (current)
state2.pot.key = Energy::Energybase::NEW; // this is the new energy (trial)
state1.pot.init();
uinit = state1.pot.energy(c);
state2.sync(state1, c);
state2.pot.init();
// Hack in reference to state1 in speciation
for (auto base : moves.vec) {
auto derived = std::dynamic_pointer_cast<Move::SpeciationMove<Tspace>>(base);
if (derived)
derived->setOther(state1.spc);
}
#ifndef NDEBUG
double u2 = state2.pot.energy(c);
double error = std::fabs(uinit-u2);
if (std::isfinite(uinit)) {
if (uinit!=0)
assert(error/uinit<1e-3);
else
assert(error<1e-6);
}
//cout << "u1 = " << uinit << " u2 = " << u2 << endl;
//assert( std::fabs((uinit-u2)/uinit)<1e-3 );
#endif
}
public:
Move::Propagator<Tspace> moves;
auto& pot() { return state1.pot; }
auto& space() { return state1.spc; }
const auto& pot() const { return state1.pot; }
const auto& space() const { return state1.spc; }
const auto& geometry() const { return state1.spc.geo; }
const auto& particles() const { return state1.spc.p; }
double drift() {
Change c; c.all=true;
double ufinal = state1.pot.energy(c);
return ( ufinal-(uinit+dusum) ) / uinit;
} //!< Calculates the relative energy drift from initial configuration
MCSimulation(const json &j, MPI::MPIController &mpi) : state1(j), state2(j), moves(j, state2.spc, mpi) {
init();
}
void store(json &j) const {
j = state1.spc;
j["random-move"] = Move::Movebase::slump;
j["random-global"] = Faunus::random;
} // store system to json object
void restore(const json &j) {
state1.spc = j;
state2.spc = j;
Move::Movebase::slump = j["random-move"]; // restore move random number generator
Faunus::random = j["random-global"]; // restore global random number generator
reactions<Tpvec> = j.at("reactionlist").get<decltype(reactions<Tpvec>)>(); // should be handled by space
init();
} //!< restore system from previously store json object
void move() {
Change change;
for (int i=0; i<moves.repeat(); i++) {
auto mv = moves.sample(); // pick random move
if (mv != moves.end() ) {
change.clear();
(**mv).move(change);
if (!change.empty()) {
double unew, uold, du;
#pragma omp parallel sections
{
#pragma omp section
{ unew = state2.pot.energy(change); }
#pragma omp section
{ uold = state1.pot.energy(change); }
}
du = unew - uold;
// if any energy returns NaN (from i.e. division by zero), the
// configuration will always be rejected, or if moving from NaN
// to a finite energy, always accepted.
if (std::isnan(uold) and not std::isnan(unew))
du = -pc::infty; // accept
else if (std::isnan(unew))
du = pc::infty; // reject
// if the difference in energy is NaN (from i.e. infinity minus infinity), the
// configuration will always be accepted. This should be
// noted during equilibration.
else if (std::isnan(du))
du = 0; // accept
double bias = (**mv).bias(change, uold, unew) + Nchem( state2.spc, state1.spc , change);
if ( metropolis(du + bias) ) { // accept move
state1.sync( state2, change );
(**mv).accept(change);
}
else { // reject move
state2.sync( state1, change );
(**mv).reject(change);
du=0;
}
dusum+=du; // sum of all energy changes
}
}
}
}
void to_json(json &j) {
j = state1.spc.info();
j["temperature"] = pc::temperature / 1.0_K;
j["moves"] = moves;
j["energy"].push_back(state1.pot);
}
};
template<class Tgeometry, class Tparticle>
void to_json(json &j, MCSimulation<Tgeometry,Tparticle> &mc) {
mc.to_json(j);
}
/**
* @brief add documentation.....
*
* @f[
* \beta U = \ln ( \sum N_o!/N_n! \exp([N_n - N_o]\beta \mu) V^{N_n - N_o} )
* @f]
*
* @todo
* - Rename to something more descriptive
* - use exception message to suggest how to fix the problem
*/
template<typename Tspace>
double Nchem( Tspace &spc_n, Tspace &spc_o, const Change &change) {
using Tpvec = typename Tspace::Tpvec;
double NoverO=0;
if ( change.dNpart ) {// Have the number of any molecules changed
for ( auto &m : change.groups ) { // ToDo fix so that it works for dN > 1 for molecuar species
int N_o = 0;//spc_o.moltracker[spc_n.groups[m.index].id];
int N_n = 0;//spc_n.moltracker[spc_n.groups[m.index].id];
// int dN = N_n - N_o;
if (!m.dNpart)
if (!molecules<Tpvec>[ spc_n.groups[m.index].id ].atomic) { // Molecular species
auto mollist_n = spc_n.findMolecules(m.index, Tspace::ACTIVE);
auto mollist_o = spc_o.findMolecules(m.index, Tspace::ACTIVE);
N_n=size(mollist_n);
N_o=size(mollist_o);
}
if ( m.dNpart ) {
auto mollist_n = spc_n.findMolecules(spc_n.groups[m.index].id, Tspace::ALL);
auto mollist_o = spc_o.findMolecules(spc_o.groups[m.index].id, Tspace::ALL);
if ( size(mollist_n) > 1 || size(mollist_o) > 1 )
throw std::runtime_error("Bad definition: One group per atomic molecule!");
// Below is safe due to the catches above
// add consistency criteria with m.atoms.size() == N
N_n = mollist_n.begin()->size();
N_o = mollist_o.begin()->size();
}
int dN = N_n - N_o;
// std::cout <<"old dN "<<dN<<" new dN"<< spc_n.moltracker[spc_n.groups[m.index].id]-spc_o.moltracker[spc_n.groups[m.index].id]<<std::endl
// <<"old N_n and N_o "<<N_n<<" "<<N_o<<std::endl
// <<"new N_n and N_o "<<spc_n.moltracker[spc_n.groups[m.index].id]<<" "<<spc_o.moltracker[spc_n.groups[m.index].id]<<std::endl;
if (dN!=0) {
double V_n = spc_n.geo.getVolume();
double V_o = spc_o.geo.getVolume();
double betamu = molecules<Tpvec>[ spc_n.groups[m.index].id ].activity;
// todo: add runtime error if activity <=0 ?
if (betamu > 1e-20)
betamu = std::log( betamu / 1.0_molar );
if (dN>0)
for (int n=0; n < dN; n++)
NoverO += -std::log( (N_o + 1 + n) / ( V_n * 1.0_molar )) + betamu;
else if (dN<0)
for (int n=0; n < (-dN); n++)
NoverO += std::log( (N_o - n) / ( V_n * 1.0_molar )) - betamu;
}
}
}
return -NoverO; // negative sign since Pref exp{-beta(dU)} = exp{-beta(dU -ln(Pref)}
}
}//Faunus namespace
|
dependences.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
#include "callback.h"
#include <omp.h>
#include <math.h>
#include <unistd.h>
int main()
{
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
#pragma omp task depend(out:x)
{
x++;
delay(100);
}
print_fuzzy_address(1);
print_ids(0);
#pragma omp task depend(in:x)
{
x = -1;
}
print_ids(0);
}
}
x++;
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_dependence'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[FIRST_TASK:[0-f]+]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependences: task_id=[[FIRST_TASK]], deps={{0x[0-f]+}}, ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[SECOND_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependences: task_id=[[SECOND_TASK]], deps={{0x[0-f]+}}, ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependence_pair: first_task_id=[[FIRST_TASK]], second_task_id=[[SECOND_TASK]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
return 0;
}
|
SparseLinear.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SparseLinear.c"
#else
#ifdef _OPENMP
#include <omp.h>
#endif
#define ROW_PTR2(t, r) (THTensor_(data)(t) + (r) * (t)->stride[0])
#define COL_PTR2(t, c) (THTensor_(data)(t) + (c) * (t)->stride[1])
static bool THNN_(checkLegacyInput)(THTensor* t)
{
return t->nDimension == 3 && t->size[2] == 2;
}
static bool THNN_(checkInput)(THTensor* t)
{
return t->nDimension == 2 && t->size[1] == 3;
}
static bool THNN_(checkSize2D)(THTensor* t, int64_t size0, int64_t size1)
{
return t->nDimension == 2 && t->size[0] == size0 && t->size[1] == size1;
}
static bool THNN_(checkSize1D)(THTensor* t, int64_t size0)
{
return t->nDimension == 1 && t->size[0] == size0;
}
static void THNN_(set1d)(THTensor *t, int64_t x0, real value) {
THStorage_(set)(t->storage, t->storageOffset + x0*t->stride[0], value);
}
static real THNN_(get3d)(const THTensor *t, int64_t x0, int64_t x1, int64_t x2) {
return THStorage_(get)(t->storage, t->storageOffset +
x0*t->stride[0] + x1*t->stride[1] + x2*t->stride[2]);
}
static real THNN_(get2d)(const THTensor *t, int64_t x0, int64_t x1) {
return THStorage_(get)(t->storage, t->storageOffset +
x0*t->stride[0] + x1*t->stride[1]);
}
void THNN_(SparseLinear_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias)
{
int64_t h, i, j, hp0, hp1;
int64_t outDim = THTensor_(size)(weight, 0);
int64_t inDim = THTensor_(size)(weight, 1);
int64_t batchSize = THTensor_(size)(output, 0);
THArgCheck(THNN_(checkInput)(input), 2, "input must be in coo format, nnz x 3");
THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong");
int64_t nnz = THTensor_(size)(input, 0);
THLongTensor * csr = THLongTensor_newWithSize1d(batchSize+1);
THLongTensor_zero(csr);
weight = THTensor_(newContiguous)(weight);
//#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000)
for (i=0; i<nnz; i++) {
hp0 = (int64_t)(THNN_(get2d)(input, i, 0)) - 1;
hp1 = (i+1 == nnz) ?
batchSize :
(int64_t)(THNN_(get2d)(input, i+1, 0)) - 1;
if (hp0 != hp1) for (h = hp0; h < hp1; h++) {
THLongTensor_set1d(csr, h+1, i+1);
}
}
// output = weight * input + bias
THTensor_(zero)(output);
#pragma omp parallel for private(h, i) schedule(static) if (nnz > 10000)
for (h = 0; h < batchSize; h++) {
int64_t i_start = THLongTensor_get1d(csr, h);
int64_t i_end = THLongTensor_get1d(csr, h+1);
for (i = i_start; i < i_end; i++) {
real val = THNN_(get2d)(input, i, 2);
if (val == 0) {
continue;
}
int64_t offset = (int64_t)(THNN_(get2d)(input, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
COL_PTR2(weight, offset), weight->stride[0],
ROW_PTR2(output, h), output->stride[1]);
} else {
THError("index out of bound. updateOutput: %d not between 1 and %d",
offset + 1, inDim);
}
}
}
THTensor* output_row = THTensor_(new)();
for (h = 0; h < batchSize; h++) {
THTensor_(select)(output_row, output, 0, h);
THTensor_(cadd)(output_row, bias, 1.0, output_row);
}
THTensor_(free)(output_row);
THLongTensor_free(csr);
THTensor_(free)(weight);
}
void THNN_(SparseLinear_legacyUpdateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias)
{
int64_t h, i;
int64_t outDim = THTensor_(size)(weight, 0);
int64_t inDim = THTensor_(size)(weight, 1);
THArgCheck(THNN_(checkLegacyInput)(input), 2, "input size must be batchsize x nnz x 2");
THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong");
weight = THTensor_(newContiguous)(weight);
int64_t batchSize = THTensor_(size)(input, 0);
int64_t nnz = THTensor_(size)(input, 1);
THTensor_(resize2d)(output, batchSize, outDim);
// output = weight * input + bias
THTensor_(zero)(output);
#pragma omp parallel for private(h, i) schedule(static) if ( \
batchSize > 1 && batchSize * nnz * outDim > 10000)
for (h = 0; h < batchSize; h++) {
for (i = 0; i < nnz; i++) {
real val = THNN_(get3d)(input, h, i, 1);
if (val == 0) {
continue;
}
int64_t offset = (int64_t)(THNN_(get3d)(input, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
COL_PTR2(weight, offset), weight->stride[0],
ROW_PTR2(output, h), output->stride[1]);
} else {
THError("index out of bound. updateOutput: %d not between 1 and %d",
offset + 1, inDim);
}
}
}
THTensor* output_row = THTensor_(new)();
for (h = 0; h < batchSize; h++) {
THTensor_(select)(output_row, output, 0, h);
THTensor_(cadd)(output_row, bias, 1.0, output_row);
}
THTensor_(free)(output_row);
THTensor_(free)(weight);
}
void THNN_(SparseLinear_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
accreal weightDecay_,
accreal scale_)
{
real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int64_t h, i, col, hp0, hp1;
int64_t outDim = THTensor_(size)(weight, 0);
int64_t inDim = THTensor_(size)(weight, 1);
THArgCheck(THNN_(checkInput)(input), 2,
"input must be in coo format, nnz x 3");
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5,
"gradBias size wrong");
THArgCheck(THTensor_(isContiguous)(gradOutput), 1,
"gradOutput must be contiguous");
int64_t nnz = THTensor_(size)(input, 0);
THLongTensor* csc = THLongTensor_newWithSize1d(inDim+1);
THLongTensor_zero(csc);
weight = THTensor_(newContiguous)(weight);
#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000)
for (i = 0; i < nnz; i++) {
hp0 = (int64_t)(THNN_(get2d)(input, i, 1)) - 1;
hp1 = (i+1 == nnz) ?
inDim :
(int64_t)(THNN_(get2d)(input, i+1, 1)) - 1;
if (hp0 != hp1) for (h = hp0; h < hp1; h++) {
THLongTensor_set1d(csc, h+1, i+1);
}
}
// gradWeight += gradOutput * input
#pragma omp parallel for private(h, i, col) schedule(static) if (nnz > 10000)
for (col = 0; col < inDim; col++) {
int64_t i_start = THLongTensor_get1d(csc, col);
int64_t i_end = THLongTensor_get1d(csc, col+1);
for (i = i_start; i < i_end; i++) {
real val = scale * THNN_(get2d)(input, i, 2);
h = (int64_t)(THNN_(get2d)(input, i, 0)) - 1;
int64_t offset = (int64_t)(THNN_(get2d)(input, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
ROW_PTR2(gradOutput, h), gradOutput->stride[1],
COL_PTR2(gradWeight, offset), gradWeight->stride[0]);
} else {
THError(
"index out of bound. accGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
// gradBias += gradOutput
THTensor* buf = THTensor_(new)();
THTensor_(sum)(buf, gradOutput, 0, 1);
THTensor_(cadd)(gradBias, gradBias, scale, buf);
THTensor_(free)(buf);
THLongTensor_free(csc);
if (weightDecay != 0) {
THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight);
}
THTensor_(free)(weight);
}
void THNN_(SparseLinear_legacyAccGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
accreal weightDecay_,
accreal scale_)
{
real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
int64_t h, i;
int64_t outDim = THTensor_(size)(weight, 0);
int64_t inDim = THTensor_(size)(weight, 1);
THArgCheck(THNN_(checkLegacyInput)(input), 2,
"input size must be batchsize x nnz x 2");
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5,
"gradBias size wrong");
THArgCheck(THTensor_(isContiguous)(gradOutput), 1,
"gradOutput must be contiguous");
int64_t batchSize = THTensor_(size)(input, 0);
int64_t nnz = THTensor_(size)(input, 1);
THTensor_(resize2d)(gradOutput, batchSize, outDim);
// gradWeight += gradOutput * input
#pragma omp parallel for private(h, i) schedule(static) if (\
batchSize * nnz * outDim > 10000)
for (i = 0; i < nnz; i++) {
for (h = 0; h < batchSize; h++) {
real val = scale * THNN_(get3d)(input, h, i, 1);
if (val == 0) {
continue;
}
int64_t offset = (int64_t)(THNN_(get3d)(input, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
ROW_PTR2(gradOutput, h), gradOutput->stride[1],
COL_PTR2(gradWeight, offset), gradWeight->stride[0]);
} else {
THError(
"index out of bound. accGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
// gradBias += gradOutput
THTensor* gradOutput_row = THTensor_(new)();
for (h = 0; h < batchSize; h++) {
THTensor_(select)(gradOutput_row, gradOutput, 0, h);
THTensor_(cadd)(gradBias, gradBias, scale, gradOutput_row);
}
THTensor_(free)(gradOutput_row);
if (weightDecay != 0) {
THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight);
}
}
void THNN_(SparseLinear_updateParameters)(
THNNState *state,
THTensor *weight,
THTensor *bias,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
accreal learningRate_)
{
real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
int64_t h, i;
int64_t outDim = weight->size[0];
int64_t inDim = weight->size[1];
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong");
THArgCheck(THNN_(checkInput)(lastInput), 6,
"input must be in coo format, nnz x 3");
int64_t nnz = THTensor_(size)(lastInput, 0);
// collect unique offsets of non-0 val in input
THTensor* offsets = THTensor_(newWithSize1d)(nnz);
int64_t cnt = 0;
for (i = 0; i < nnz; i++) {
real val = THNN_(get2d)(lastInput, i, 2);
if (val == 0) {
continue;
}
int64_t offset = (int64_t)(THNN_(get2d)(lastInput, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
THNN_(set1d)(offsets, cnt++, offset);
} else {
THError(
"index out of bound. updateParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
if (cnt == 0) return;
THTensor_(resize1d)(offsets, cnt);
THTensor* uniqueOffsets = THTensor_(new)();
THLongTensor* ri = THLongTensor_new();
THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0);
THLongTensor_free(ri);
THTensor_(free)(offsets);
cnt = 1;
real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets);
for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) {
if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) {
uniqueOffsets_p[cnt++] = uniqueOffsets_p[i];
}
}
THTensor_(resize1d)(uniqueOffsets, cnt);
// weight += -learningRate * gradWeight
THTensor_(cadd)(bias, bias, -learningRate, gradBias);
#pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000)
for (i = 0; i < cnt; i++) {
int64_t offset = (int64_t)uniqueOffsets_p[i];
THBlas_(axpy)(outDim,
-learningRate,
COL_PTR2(gradWeight, offset), gradWeight->stride[0],
COL_PTR2(weight, offset), weight->stride[0]);
}
THTensor_(free)(uniqueOffsets);
}
void THNN_(SparseLinear_legacyUpdateParameters)(
THNNState *state,
THTensor *weight,
THTensor *bias,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
accreal learningRate_)
{
real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
int64_t h, i;
int64_t outDim = weight->size[0];
int64_t inDim = weight->size[1];
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong");
THArgCheck(THNN_(checkLegacyInput)(lastInput), 6,
"input size must be batchsize x nnz x 2");
int64_t batchSize = THTensor_(size)(lastInput, 0);
int64_t nnz = THTensor_(size)(lastInput, 1);
// collect unique offsets of non-0 val in input
THTensor* offsets = THTensor_(newWithSize1d)(batchSize * nnz);
int64_t cnt = 0;
for (h = 0; h < batchSize; h++) {
for (i = 0; i < nnz; i++) {
real val = THNN_(get3d)(lastInput, h, i, 1);
if (val == 0 ) {
continue;
}
int64_t offset = (int64_t)(THNN_(get3d)(lastInput, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
THNN_(set1d)(offsets, cnt++, offset);
} else {
THError(
"index out of bound. updateParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
THTensor_(resize1d)(offsets, cnt);
THTensor* uniqueOffsets = THTensor_(new)();
THLongTensor* ri = THLongTensor_new();
THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0);
THLongTensor_free(ri);
THTensor_(free)(offsets);
cnt = 1;
real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets);
for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) {
if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) {
uniqueOffsets_p[cnt++] = uniqueOffsets_p[i];
}
}
THTensor_(resize1d)(uniqueOffsets, cnt);
// weight += -learningRate * gradWeight
THTensor_(cadd)(bias, bias, -learningRate, gradBias);
#pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000)
for (i = 0; i < cnt; i++) {
int64_t offset = (int64_t)uniqueOffsets_p[i];
THBlas_(axpy)(outDim,
-learningRate,
COL_PTR2(gradWeight, offset), gradWeight->stride[0],
COL_PTR2(weight, offset), weight->stride[0]);
}
THTensor_(free)(uniqueOffsets);
}
void THNN_(SparseLinear_zeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput)
{
int64_t h, i, j;
int64_t outDim = gradWeight->size[0];
int64_t inDim = gradWeight->size[1];
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong");
THArgCheck(THNN_(checkInput)(lastInput), 4,
"input must be in coo format, nnz x 3");
THTensor_(zero)(gradBias);
int64_t nnz = THTensor_(size)(lastInput, 0);
#pragma omp parallel for private(i, j) schedule(static) if ( \
nnz * outDim > 10000)
for (i = 0; i < nnz; i++) {
if (THNN_(get2d)(lastInput, i, 2) == 0 ) {
continue;
}
int64_t offset = (int64_t)(THNN_(get2d)(lastInput, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
real* pGradWeight = COL_PTR2(gradWeight, offset);
if (gradWeight->stride[0] == 1) {
THVector_(fill)(pGradWeight, 0, outDim);
} else {
int64_t stride = gradWeight->stride[0];
for (j = 0; j < outDim; ++j) {
pGradWeight[j * stride] = 0;
}
}
} else {
THError(
"index out of bound. zeroGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
void THNN_(SparseLinear_legacyZeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput)
{
int64_t h, i, j;
int64_t outDim = gradWeight->size[0];
int64_t inDim = gradWeight->size[1];
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong");
THArgCheck(THNN_(checkLegacyInput)(lastInput), 4,
"input size must be batchsize x nnz x 2");
THTensor_(zero)(gradBias);
int64_t batchSize = THTensor_(size)(lastInput, 0);
int64_t nnz = THTensor_(size)(lastInput, 1);
#pragma omp parallel for private(h, i, j) schedule(static) if ( \
batchSize > 1 && batchSize * nnz * outDim > 10000)
for (h = 0; h < batchSize; h++) {
for (i = 0; i < nnz; i++) {
if (THNN_(get3d)(lastInput, h, i, 1) == 0 ) {
continue;
}
int64_t offset = (int64_t)(THNN_(get3d)(lastInput, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
real* pGradWeight = COL_PTR2(gradWeight, offset);
if (gradWeight->stride[0] == 1) {
THVector_(fill)(pGradWeight, 0, outDim);
} else {
int64_t stride = gradWeight->stride[0];
for (j = 0; j < outDim; ++j) {
pGradWeight[j * stride] = 0;
}
}
} else {
THError(
"index out of bound. zeroGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
}
#undef ROW_PTR2
#undef COL_PTR2
#endif
|
2018-ordered-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
two dimensional array computation with ordered loops.
ordered (n) will make 'omp for' affect more levels of loops.
only 1 level loop is affected. j is shared, causing data races.
*/
int a[100][100], b[100][100], c[100][100];
int main()
{
int i,j;
#pragma omp parallel for ordered
for (i=0;i<100;i++)
for (j=0;j<100;j++)
{
a[i][j]=b[i][j]*c[i][j];
#pragma omp ordered
printf ("debug here\n");
}
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,8),ceild(4*t2-Nz-3,16));t3<=min(min(floord(4*Nt+Ny-9,16),floord(2*t1+Ny-3,16)),floord(4*t2+Ny-9,16));t3++) {
for (t4=max(max(ceild(t1-12,16),ceild(4*t2-Nz-19,32)),ceild(16*t3-Ny-19,32));t4<=min(min(min(floord(4*Nt+Nx-9,32),floord(2*t1+Nx-3,32)),floord(4*t2+Nx-9,32)),floord(16*t3+Nx+3,32));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(32*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
remarks_parallel_in_target_state_machine.c | // RUN: %clang_cc1 -verify=host -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out
// host-no-diagnostics
void baz(void) __attribute__((assume("omp_no_openmp")));
void bar(void) {
#pragma omp parallel // #1 \
// expected-remark@#1 {{Parallel region is used in unknown ways. Will not attempt to rewrite the state machine. [OMP101]}}
{
}
}
void foo(void) {
#pragma omp target teams // #2
// expected-remark@#2 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
baz(); // expected-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
#pragma omp parallel
{
}
bar();
#pragma omp parallel
{
}
}
}
void spmd(void) {
// Verify we do not emit the remarks above for "SPMD" regions.
#pragma omp target teams
#pragma omp parallel
{
}
#pragma omp target teams distribute parallel for
for (int i = 0; i < 100; ++i) {
}
}
// expected-remark@* {{OpenMP runtime call __kmpc_global_thread_num deduplicated. [OMP170]}}
|
symv_c_csr_n_lo_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include<stdlib.h>
static alphasparse_status_t
symv_x_csr_n_lo_conj_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mule(y[i], beta);
}
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_Number tmp;
for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai)
{
const ALPHA_INT col = A->col_indx[ai];
if(col > i)
{
continue;
}
else if(col == i)
{
alpha_setzero(tmp);
cmp_conj(tmp, A->values[ai]);
alpha_mul(tmp, alpha, tmp);
alpha_madde(y_local[tid][i], tmp, x[col]);
}
else
{
alpha_setzero(tmp);
cmp_conj(tmp, A->values[ai]);
alpha_mul(tmp, alpha, tmp);
alpha_madde(y_local[tid][col], tmp, x[i]);
alpha_madde(y_local[tid][i], tmp, x[col]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT row = 0; row < m; row++)
for(ALPHA_INT i = 0; i < num_threads; i++)
alpha_adde(y[row], y_local[i][row]);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return symv_x_csr_n_lo_conj_omp(alpha, A, x, beta, y);
}
|
vectors.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "timer.h"
int g_matrix_n, g_matrix_m, g_num_threads;
typedef struct matrix_t {
int** values;
int* row_size;
} matrix_t;
void print_matrix(matrix_t*);
/* Sum each row of the provided matrix, using the row_size to determine how many items are in a row */
int* sum_rows(matrix_t* matrix)
{
int *sum_vector = malloc(sizeof(int) * g_matrix_n);
if (!sum_vector) {
fprintf(stderr, "Failed to malloc sum_vector\n");
return NULL;
}
#pragma omp parallel for schedule(static) num_threads(g_num_threads)
for (int i = 0; i < g_matrix_n; i++) {
sum_vector[i] = 0;
}
#pragma omp parallel for schedule(static) num_threads(g_num_threads)
for (int n = 0; n < g_matrix_n; n++) {
for (int m = 0; m < matrix->row_size[n]; m++) {
sum_vector[n] += matrix->values[n][m];
}
}
return sum_vector;
}
/* Create a matrix, either triangular or non triangular and fill it with the appropriate values */
matrix_t* matrix(int is_triangular)
{
/* Malloc matrix struct */
matrix_t* matrix = malloc(sizeof(matrix_t*));
if (!matrix) {
fprintf(stderr, "Failed to malloc struct matrix\n");
return NULL;
}
/* Malloc matrix values */
matrix->values = malloc(sizeof(int*) * g_matrix_m);
if (!matrix->values) {
fprintf(stderr, "Failed to malloc matrix\n");
return NULL;
}
/* Malloc matrix row sizes */
matrix->row_size = malloc(sizeof(int) * g_matrix_n);
if (!matrix->row_size) {
fprintf(stderr, "Failed to malloc row size\n");
return NULL;
}
/* Malloc matrix columns */
#pragma omp parallel for schedule(static) num_threads(g_num_threads)
for (int i = 0; i < g_matrix_n; i++) {
matrix->row_size[i] = g_matrix_n - (i * is_triangular);
matrix->values[i] = malloc(sizeof(int) * matrix->row_size[i]);
if (!matrix->values[i]) {
fprintf(stderr, "Failed to malloc matrix[%d]\n", i);
}
}
/* Matrix[n][m] n = vertical, m = horizontal. eg. Matrix[2][3] is 2nd row (from top) 3rd value. */
/* n is vert size m = hori size */
#pragma omp parallel for schedule(static) num_threads(g_num_threads)
for (int n = 0; n < g_matrix_n; n++) {
//#pragma omp parallel for schedule(static) num_threads(g_num_threads)
for (int m = 0; m < matrix->row_size[n]; m++) {
matrix->values[n][m] = n + (m + (g_matrix_m - matrix->row_size[n]));
}
}
return matrix;
}
matrix_t* init_matrix()
{
/* Make a normal, non-triangular matrix */
return matrix(0);
}
matrix_t* init_matrix_triangular()
{
/* Make a triangular matrix */
return matrix(1);
}
/* Print a matrix */
void print_matrix(matrix_t* matrix)
{
for (int n = 0; n < g_matrix_n; n++) {
for (int m = 0; m < matrix->row_size[n]; m++) {
printf("%d ", matrix->values[n][m]);
if (matrix->values[n][m] < 10) {
printf(" ");
}
else if (matrix->values[n][m] < 100) {
printf(" ");
}
else if (matrix->values[n][m] < 1000) {
printf(" ");
}
else if (matrix->values[n][m] < 10000) {
printf(" ");
}
}
printf("\n");
}
return;
}
int main(int argc, char* argv[])
{
double time;
int* sum_vector;
/* We allow only square matrices */
g_matrix_n = g_matrix_m = atoi(argv[1]);
g_num_threads = atoi(argv[2]);
matrix_t* matrix;
matrix = init_matrix_triangular();
if (!matrix) {
return EXIT_FAILURE;
}
timer_start();
sum_vector = sum_rows(matrix);
if (!sum_vector) {
return EXIT_FAILURE;
}
time = timer_end();
printf("%d, %d, %lf\n", g_matrix_n, g_num_threads, time);
/* print_matrix(matrix); */
/* Free this stupid shit */
for (int i = 0; i < g_matrix_n; i++) {
free(matrix->values[i]);
}
free(matrix->values);
free(matrix->row_size);
free(matrix);
free(sum_vector);
return EXIT_SUCCESS;
}
|
vector_mul_mp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#define n 100000
int main()
{
double a[n],b[n], c[n], random_a, random_b;
float startTime, endTime,execTime;
int i;
int omp_rank;
srand(time(0));
startTime = omp_get_wtime();
#pragma omp parallel private (i) shared (a,b,c)
{
#pragma omp for
for(i=0;i<n;i++)
{
random_a = rand() , random_b = rand();
omp_rank = omp_get_thread_num();
a[i] = i * random_a;
b[i] = i * random_b;
for(int j=1;j<n;j++)
c[i] = a[i] * b[i];
//printf("The value of a[%d] = %lf and b[%d] = %lf and result c[%d] = %lf Thread rank = %d\n", i, a[i], i, b[i], i, c[i], omp_rank);
}
}
endTime = omp_get_wtime();
execTime = endTime - startTime;
printf("%f \n",execTime);
return(0);
} |
hypre_merge_sort.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
#include "../seq_mv/HYPRE_seq_mv.h"
//#define DBG_MERGE_SORT
#ifdef DBG_MERGE_SORT
#include <algorithm>
#include <unordered_map>
#endif
#define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0)
/* union of two sorted (in ascending order) array arr1 and arr2 into arr3
* Assumption: no duplicates in arr1 and arr2
* arr3 should have enough space on entry
* map1 and map2 map arr1 and arr2 to arr3 */
void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3,
HYPRE_Int *map1, HYPRE_Int *map2)
{
HYPRE_Int i = 0, j = 0, k = 0;
while (i < n1 && j < n2)
{
if (arr1[i] < arr2[j])
{
if (map1) { map1[i] = k; }
arr3[k++] = arr1[i++];
}
else if (arr1[i] > arr2[j])
{
if (map2) { map2[j] = k; }
arr3[k++] = arr2[j++];
}
else /* == */
{
if (map1) { map1[i] = k; }
if (map2) { map2[j] = k; }
arr3[k++] = arr1[i++];
j++;
}
}
while (i < n1)
{
if (map1) { map1[i] = k; }
arr3[k++] = arr1[i++];
}
while (j < n2)
{
if (map2) { map2[j] = k; }
arr3[k++] = arr2[j++];
}
*n3 = k;
}
static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out)
{
for ( ; first1 != last1; ++out)
{
if (first2 == last2)
{
for ( ; first1 != last1; ++first1, ++out)
{
*out = *first1;
}
return;
}
if (*first2 < *first1)
{
*out = *first2;
++first2;
}
else
{
*out = *first1;
++first1;
}
}
for ( ; first2 != last2; ++first2, ++out)
{
*out = *first2;
}
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static void hypre_big_merge(HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out)
{
for ( ; first1 != last1; ++out)
{
if (first2 == last2)
{
for ( ; first1 != last1; ++first1, ++out)
{
*out = *first1;
}
return;
}
if (*first2 < *first1)
{
*out = *first2;
++first2;
}
else
{
*out = *first1;
++first1;
}
}
for ( ; first2 != last2; ++first2, ++out)
{
*out = *first2;
}
}
#endif
static void kth_element_(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2,
HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
while (1)
{
HYPRE_Int i = (left + right)/2; // right < k -> i < k
HYPRE_Int j = k - i - 1;
#ifdef DBG_MERGE_SORT
hypre_assert(left <= right && right <= k);
hypre_assert(i < k); // i == k implies left == right == k that can never happen
hypre_assert(j >= 0 && j < n2);
#endif
if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1]))
{
*out1 = i; *out2 = j + 1;
return;
}
else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1]))
{
*out1 = i + 1; *out2 = j;
return;
}
else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1])
{
// search in left half of a1
right = i - 1;
}
else
{
// search in right half of a1
left = i + 1;
}
}
}
/**
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*/
static void kth_element(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
// either of the inputs is empty
if (n1 == 0)
{
*out1 = 0; *out2 = k;
return;
}
if (n2 == 0)
{
*out1 = k; *out2 = 0;
return;
}
if (k >= n1 + n2)
{
*out1 = n1; *out2 = n2;
return;
}
// one is greater than the other
if (k < n1 && a1[k] <= a2[0])
{
*out1 = k; *out2 = 0;
return;
}
if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1])
{
*out1 = n1; *out2 = k - n1;
return;
}
if (k < n2 && a2[k] <= a1[0])
{
*out1 = 0; *out2 = k;
return;
}
if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1])
{
*out1 = k - n2; *out2 = n2;
return;
}
// now k > 0
// faster to do binary search on the shorter sequence
if (n1 > n2)
{
SWAP(HYPRE_Int, n1, n2);
SWAP(HYPRE_Int *, a1, a2);
SWAP(HYPRE_Int *, out1, out2);
}
if (k < (n1 + n2)/2)
{
kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k);
}
else
{
// when k is big, faster to find (n1 + n2 - k)th biggest element
HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0);
HYPRE_Int new_k = k - offset1 - offset2;
HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1);
HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1);
kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k);
*out1 += offset1;
*out2 += offset2;
}
#ifdef DBG_MERGE_SORT
hypre_assert(*out1 + *out2 == k);
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static void big_kth_element_(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_BigInt *a1, HYPRE_BigInt *a2,
HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
while (1)
{
HYPRE_Int i = (left + right)/2; // right < k -> i < k
HYPRE_Int j = k - i - 1;
#ifdef DBG_MERGE_SORT
hypre_assert(left <= right && right <= k);
hypre_assert(i < k); // i == k implies left == right == k that can never happen
hypre_assert(j >= 0 && j < n2);
#endif
if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1]))
{
*out1 = i; *out2 = j + 1;
return;
}
else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1]))
{
*out1 = i + 1; *out2 = j;
return;
}
else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1])
{
// search in left half of a1
right = i - 1;
}
else
{
// search in right half of a1
left = i + 1;
}
}
}
/**
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*/
static void big_kth_element(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
// either of the inputs is empty
if (n1 == 0)
{
*out1 = 0; *out2 = k;
return;
}
if (n2 == 0)
{
*out1 = k; *out2 = 0;
return;
}
if (k >= n1 + n2)
{
*out1 = n1; *out2 = n2;
return;
}
// one is greater than the other
if (k < n1 && a1[k] <= a2[0])
{
*out1 = k; *out2 = 0;
return;
}
if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1])
{
*out1 = n1; *out2 = k - n1;
return;
}
if (k < n2 && a2[k] <= a1[0])
{
*out1 = 0; *out2 = k;
return;
}
if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1])
{
*out1 = k - n2; *out2 = n2;
return;
}
// now k > 0
// faster to do binary search on the shorter sequence
if (n1 > n2)
{
SWAP(HYPRE_Int, n1, n2);
SWAP(HYPRE_BigInt *, a1, a2);
SWAP(HYPRE_Int *, out1, out2);
}
if (k < (n1 + n2)/2)
{
big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k);
}
else
{
// when k is big, faster to find (n1 + n2 - k)th biggest element
HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0);
HYPRE_Int new_k = k - offset1 - offset2;
HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1);
HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1);
big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k);
*out1 += offset1;
*out2 += offset2;
}
#ifdef DBG_MERGE_SORT
hypre_assert(*out1 + *out2 == k);
#endif
}
#endif
/**
* @param num_threads number of threads that participate in this merge
* @param my_thread_num thread id (zeor-based) among the threads that participate in this merge
*/
static void hypre_parallel_merge(
HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2,
HYPRE_Int *out,
HYPRE_Int num_threads, HYPRE_Int my_thread_num)
{
HYPRE_Int n1 = last1 - first1;
HYPRE_Int n2 = last2 - first2;
HYPRE_Int n = n1 + n2;
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n);
HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n);
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(first1, last1));
hypre_assert(std::is_sorted(first2, last2));
#endif
HYPRE_Int begin1, begin2, end1, end2;
kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank);
kth_element(&end1, &end2, first1, first2, n1, n2, end_rank);
while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
begin1--; begin2++;
}
while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
end1--; end2++;
}
#ifdef DBG_MERGE_SORT
hypre_assert(begin1 <= end1);
hypre_assert(begin2 <= end2);
#endif
hypre_merge(
first1 + begin1, first1 + end1,
first2 + begin2, first2 + end2,
out + begin1 + begin2);
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2));
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
/**
* @param num_threads number of threads that participate in this merge
* @param my_thread_num thread id (zeor-based) among the threads that participate in this merge
*/
static void hypre_big_parallel_merge(
HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2,
HYPRE_BigInt *out,
HYPRE_Int num_threads, HYPRE_Int my_thread_num)
{
HYPRE_Int n1 = (HYPRE_Int)(last1 - first1);
HYPRE_Int n2 = (HYPRE_Int)(last2 - first2);
HYPRE_Int n = n1 + n2;
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n);
HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n);
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(first1, last1));
hypre_assert(std::is_sorted(first2, last2));
#endif
HYPRE_Int begin1, begin2, end1, end2;
big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank);
big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank);
while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
begin1--; begin2++;
}
while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
end1--; end2++;
}
#ifdef DBG_MERGE_SORT
hypre_assert(begin1 <= end1);
hypre_assert(begin2 <= end2);
#endif
hypre_big_merge(
first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1,
first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2,
out + (HYPRE_BigInt)(begin1 + begin2));
#ifdef DBG_MERGE_SORT
hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2));
#endif
}
#endif
void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out)
{
if (0 == len) return;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
#ifdef DBG_MERGE_SORT
HYPRE_Int *dbg_buf = new HYPRE_Int[len];
std::copy(in, in + len, dbg_buf);
std::sort(dbg_buf, dbg_buf + len);
#endif
// HYPRE_Int thread_private_len[hypre_NumThreads()];
// HYPRE_Int out_len = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
// thread-private sort
HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads;
HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len);
HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len);
hypre_qsort0(in, i_begin, i_end - 1);
// merge sorted sequences
HYPRE_Int in_group_size;
HYPRE_Int *in_buf = in;
HYPRE_Int *out_buf = temp;
for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
// merge 2 in-groups into 1 out-group
HYPRE_Int out_group_size = in_group_size*2;
HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size;
// HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1);
HYPRE_Int id_in_group = my_thread_num%out_group_size;
HYPRE_Int num_threads_in_group =
hypre_min(group_leader + out_group_size, num_threads) - group_leader;
HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len);
HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len);
hypre_parallel_merge(
in_buf + in_group1_begin, in_buf + in_group1_end,
in_buf + in_group2_begin, in_buf + in_group2_end,
out_buf + in_group1_begin,
num_threads_in_group,
id_in_group);
HYPRE_Int *temp = in_buf;
in_buf = out_buf;
out_buf = temp;
}
*out = in_buf;
} /* omp parallel */
#ifdef DBG_MERGE_SORT
hypre_assert(std::equal(*out, *out + len, dbg_buf));
delete[] dbg_buf;
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
void hypre_sort_and_create_inverse_map(
HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map)
{
if (len == 0)
{
return;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST);
hypre_merge_sort(in, temp, len, out);
hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());
HYPRE_Int i;
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < len; i++)
{
HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i);
hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);
#ifdef DBG_MERGE_SORT
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
#endif
}
#ifdef DBG_MERGE_SORT
std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len);
for (HYPRE_Int i = 0; i < len; ++i) {
inverse_map2[(*out)[i]] = i;
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
}
hypre_assert(hypre_UnorderedIntMapSize(inverse_map) == len);
#endif
if (*out == in)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
else
{
hypre_TFree(in, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out)
{
if (0 == len) return;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
#ifdef DBG_MERGE_SORT
HYPRE_Int *dbg_buf = new HYPRE_Int[len];
std::copy(in, in + len, dbg_buf);
std::sort(dbg_buf, dbg_buf + len);
#endif
// HYPRE_Int thread_private_len[hypre_NumThreads()];
// HYPRE_Int out_len = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
// thread-private sort
HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads;
HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len);
HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len);
hypre_BigQsort0(in, i_begin, i_end - 1);
// merge sorted sequences
HYPRE_Int in_group_size;
HYPRE_BigInt *in_buf = in;
HYPRE_BigInt *out_buf = temp;
for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
// merge 2 in-groups into 1 out-group
HYPRE_Int out_group_size = in_group_size*2;
HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size;
// HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1);
HYPRE_Int id_in_group = my_thread_num%out_group_size;
HYPRE_Int num_threads_in_group =
hypre_min(group_leader + out_group_size, num_threads) - group_leader;
HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len);
HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len);
hypre_big_parallel_merge(
in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end,
in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end,
out_buf + (HYPRE_BigInt)in_group1_begin,
num_threads_in_group,
id_in_group);
HYPRE_BigInt *temp = in_buf;
in_buf = out_buf;
out_buf = temp;
}
*out = in_buf;
} /* omp parallel */
#ifdef DBG_MERGE_SORT
hypre_assert(std::equal(*out, *out + len, dbg_buf));
delete[] dbg_buf;
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
void hypre_big_sort_and_create_inverse_map(
HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map)
{
if (len == 0)
{
return;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST);
hypre_big_merge_sort(in, temp, len, out);
hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());
HYPRE_Int i;
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < len; i++)
{
HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i);
hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);
#ifdef DBG_MERGE_SORT
if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
#endif
}
#ifdef DBG_MERGE_SORT
std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len);
for (HYPRE_Int i = 0; i < len; ++i) {
inverse_map2[(*out)[i]] = i;
if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
hypre_assert(false);
}
}
hypre_assert(hypre_UnorderedBigIntMapSize(inverse_map) == len);
#endif
if (*out == in)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
else
{
hypre_TFree(in, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#endif
#endif
/* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
|
GB_bitmap_AxB_saxpy_A_bitmap_B_sparse_template.c | //------------------------------------------------------------------------------
// GB_bitmap_AxB_saxpy_A_bitmap_B_sparse: C<#M>+=A*B, C bitmap, M any format
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is bitmap, A is bitmap or full, B is sparse or hypersparse.
// M has any format.
{
//--------------------------------------------------------------------------
// allocate workspace for each task
//--------------------------------------------------------------------------
// imeta = total number of rows of A and H in all panels
int64_t imeta = naslice * GB_PANEL_SIZE ;
// number of entries in one panel of G for A.
#if GB_HAS_BITMAP_MULTADD && !GB_IS_ANY_PAIR_SEMIRING
// Always load the A panel into G, since Ax [pA] has uninitialized values
// where Ab [pA] == 0. The GB_BITMAP_MULTADD update will access these
// values, and they must be initialized.
const bool load_apanel = true ;
#else
// only load the A panel into G if it consists of more than one panel
const bool load_apanel = (avlen > GB_PANEL_SIZE) ;
#endif
// Each panel of G is GB_PANEL_SIZE-by-avdim, held by column.
int64_t apanel_size = load_apanel ? (GB_PANEL_SIZE * avdim) : 0 ;
int64_t afpanel_size = GB_A_IS_BITMAP ? (apanel_size) : 0 ;
int64_t axpanel_size = A_is_pattern ? 0 : (apanel_size * GB_ASIZE) ;
// each panel of H is GB_PANEL_SIZE-by-bnvec, held by column; note that
// H has bnvec vectors, not bvdim. The C bitmap has bvdim vectors,
// and bnvec <= bvdim if B is hypersparse.
int64_t hpanel_size = GB_PANEL_SIZE * bnvec ;
//--------------------------------------------------------------------------
// allocate the panels
//--------------------------------------------------------------------------
// The G panels are not needed if A would fit into a single panel.
// In that case A is used in place and not copied into G.
int64_t wafsize = naslice * afpanel_size ;
int64_t waxsize = naslice * axpanel_size ;
int64_t wcsize = naslice * hpanel_size ;
int64_t wcxsize = GB_IS_ANY_PAIR_SEMIRING ? 0 : (wcsize * GB_CSIZE) ;
Wf = GB_MALLOC (wafsize + wcsize, int8_t) ;
Wax = GB_MALLOC (waxsize, GB_void) ;
Wcx = GB_MALLOC (wcxsize, GB_void) ;
if (Wf == NULL || Wax == NULL || Wcx == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// initialize the panels
//--------------------------------------------------------------------------
// for all semirings: set the bitmaps Gb and Hf to zero
GB_memset (Wf, 0, wafsize + wcsize, nthreads_max) ;
#if GB_HAS_BITMAP_MULTADD && !GB_IS_ANY_PAIR_SEMIRING
{
// Initialize the Hx workspace to identity, if this semiring has a
// concise bitmap multiply-add expression. For the any_pair semiring,
// the numerical values are not needed so Hx is not allocated.
#if GB_HAS_IDENTITY_BYTE
// the identity value can be assigned via memset
GB_memset (Wcx, GB_IDENTITY_BYTE, wcxsize, nthreads_max) ;
#else
// an explicit loop is required to set Hx to identity
// TODO: should each task initialize its own Hf and Hx,
// and use a static schedule here and for H=G*B?
GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *) Wcx ;
int64_t pH ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pH = 0 ; pH < wcsize ; pH++)
{
Hx [pH] = GB_IDENTITY ;
}
#endif
}
#endif
//--------------------------------------------------------------------------
// C<#M>=A*B, one metapanel at a time
//--------------------------------------------------------------------------
int tid ;
for (int64_t iouter = 0 ; iouter < avlen ; iouter += imeta)
{
//----------------------------------------------------------------------
// C<#M>(metapanel,:) += A (metapanel,:)*B
//----------------------------------------------------------------------
// The rows in this metapanel are iouter:iouter+imeta-1.
//----------------------------------------------------------------------
// load the metapanel: G = A (iouter:iouter+imeta-1,:)
//----------------------------------------------------------------------
if ((GB_A_IS_BITMAP || !A_is_pattern) && load_apanel)
{
// Loading the panel into G keeps its storage order. A is not
// transposed when loaded into the G panels. However, the leading
// dimension is reduced. A is avlen-by-avdim with a leading
// dimension of avlen, which can be large. G is np-by-avdim, with
// np <= GB_PANEL_SIZE. The loading of A into G can be skipped
// if all of A can be used in-place.
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//--------------------------------------------------------------
// get the panel for this task
//--------------------------------------------------------------
int a_tid = tid / nbslice ;
int b_tid = tid % nbslice ;
int64_t istart = iouter + a_tid * GB_PANEL_SIZE ;
int64_t iend = iouter + (a_tid+1) * GB_PANEL_SIZE ;
iend = GB_IMIN (iend, avlen) ;
int64_t np = iend - istart ;
if (np <= 0) continue ;
int64_t kstart, kend ;
GB_PARTITION (kstart, kend, avdim, b_tid, nbslice) ;
int8_t *GB_RESTRICT Gb = Wf + (a_tid * afpanel_size) ;
GB_ATYPE *GB_RESTRICT Gx = (GB_ATYPE *)
(Wax + (a_tid * axpanel_size)) ;
//--------------------------------------------------------------
// load A for this panel
//--------------------------------------------------------------
#if ( GB_A_IS_BITMAP )
{
//----------------------------------------------------------
// A is bitmap
//----------------------------------------------------------
if (!A_is_pattern)
{
// load Ab and Ax into Gb and Gx
for (int64_t k = kstart ; k < kend ; k++)
{
for (int64_t ii = 0 ; ii < np ; ii++)
{
// Gb (ii,k) = Ab (istart+ii,k)
const int64_t pG = ii + k*np ;
const int64_t pA = istart + ii + k*avlen ;
const int8_t gb = Ab [pA] ;
Gb [pG] = gb ;
if (gb)
{
// Gx (ii,k) = Ax (istart+ii,k)
GB_LOADA (Gx, pG, Ax, pA) ;
}
#if GB_HAS_BITMAP_MULTADD
else
{
// Gx (ii,k) = 0
Gx [pG] = GB_ATYPE_CAST (0, 0) ;
}
#endif
}
}
}
else
{
// just load the Ab bitmap into Gb, not the values
for (int64_t k = kstart ; k < kend ; k++)
{
for (int64_t ii = 0 ; ii < np ; ii++)
{
// Gb (ii,k) = Ab (istart+ii,k)
const int64_t pG = ii + k*np ;
const int64_t pA = istart + ii + k*avlen ;
Gb [pG] = Ab [pA] ;
}
}
}
}
#else
{
//----------------------------------------------------------
// A is full
//----------------------------------------------------------
if (!A_is_pattern)
{
for (int64_t k = kstart ; k < kend ; k++)
{
for (int64_t ii = 0 ; ii < np ; ii++)
{
// Gx (ii,k) = Ax (istart+ii,k)
const int64_t pG = ii + k*np ;
const int64_t pA = istart + ii + k*avlen ;
GB_LOADA (Gx, pG, Ax, pA) ;
}
}
}
}
#endif
}
}
//----------------------------------------------------------------------
// H = G*B
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// get the panel of H and G for this task
//------------------------------------------------------------------
int a_tid = tid / nbslice ;
int b_tid = tid % nbslice ;
int64_t istart = iouter + a_tid * GB_PANEL_SIZE ;
int64_t iend = iouter + (a_tid+1) * GB_PANEL_SIZE ;
iend = GB_IMIN (iend, avlen) ;
int64_t np = iend - istart ;
if (np <= 0) continue ;
const int8_t *GB_RESTRICT Gb ;
const GB_ATYPE *GB_RESTRICT Gx ;
if (load_apanel)
{
// A has been loaded into the G panel
Gb = Wf + (a_tid * afpanel_size) ;
Gx = (GB_ATYPE *) (Wax + (a_tid * axpanel_size)) ;
}
else
{
// use A in-place
Gb = Ab ;
Gx = (GB_ATYPE *) Ax ;
}
int8_t *GB_RESTRICT Hf = Wf + (a_tid * hpanel_size) + wafsize ;
GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *)
(Wcx + (a_tid * hpanel_size) * GB_CSIZE) ;
GB_XINIT ; // for plus, bor, band, and bxor monoids only
//------------------------------------------------------------------
// H_panel (:,kfirst:klast-1) = G_panel * B (:, kfirst:klast-1)
//------------------------------------------------------------------
int64_t kfirst = B_slice [b_tid] ;
int64_t klast = B_slice [b_tid + 1] ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
//--------------------------------------------------------------
// H_panel (:,kk) = G_panel * B (:,kk)
//--------------------------------------------------------------
// H and B are indexed in the compact space kk = 0:bnvec-1,
// not by the names j = 0:bvdim-1. When B is sparse, these are
// the same. If B is hypersparse, j is Bh [kk]. However, j is
// needed for the SECONDJ and SECONDJ1 multipliers.
int64_t j = GBH (Bh, kk) ;
int64_t pB = Bp [kk] ;
int64_t pB_end = Bp [kk+1] ;
int64_t pH = kk * np ;
#if GB_IS_SECONDJ_MULTIPLIER
// t = j or j+1 for SECONDJ and SECONDJ1 multipliers
GB_CIJ_DECLARE (t) ;
GB_MULT (t, ignore, ignore, ignore, ignore, j) ;
#endif
#undef GB_MULT_G_iik_B_kj
#if GB_IS_PAIR_MULTIPLIER
// t = G(ii,k) * B(k,j) is always equal to 1
#define GB_MULT_G_iik_B_kj(ii)
#elif ( GB_IS_FIRSTJ_MULTIPLIER || GB_IS_SECONDJ_MULTIPLIER )
// t is already defined for these multipliers
#define GB_MULT_G_iik_B_kj(ii)
#else
// t = G(ii,k) * B(k,j)
#define GB_MULT_G_iik_B_kj(ii) \
GB_GETA (giik, Gx, pG + ii) ; \
GB_CIJ_DECLARE (t) ; \
GB_MULT (t, giik, bkj, istart + ii, k, j)
#endif
for ( ; pB < pB_end ; pB++)
{
int64_t k = Bi [pB] ; // get B(k,j)
int64_t pG = k * np ; // get G(:,k)
GB_GET_B_kj ; // bkj = B(k,j)
GB_XLOAD (bkj) ; // X [1] = bkj (plus_times only)
// H_panel (:,j) = G_panel (:,k) * B(k,j)
for (int64_t ii = 0 ; ii < np ; ii++)
{
#if GB_HAS_BITMAP_MULTADD
{
// if (Gb (ii,k))
// if (Hf (ii,j) == 0)
// Hx (ii,j) = G (ii,k) * B(k,j) ;
// Hf (ii,j) = 1
// else
// Hx (ii,j) += G (ii,k) * B(k,j) ;
#if GB_IS_FIRSTI_MULTIPLIER
int64_t i = istart + ii ;
#endif
#if GB_A_IS_BITMAP
GB_BITMAP_MULTADD (
Hf [pH+ii], Hx [pH+ii],
Gb [pG+ii], Gx [pG+ii], bkj) ;
#else
GB_BITMAP_MULTADD (
Hf [pH+ii], Hx [pH+ii],
1, Gx [pG+ii], bkj) ;
#endif
}
#else
{
#if GB_A_IS_BITMAP
if (Gb [pG+ii])
#endif
{
// t = G(ii,k) * B(k,j)
GB_MULT_G_iik_B_kj (ii) ;
if (Hf [pH+ii] == 0)
{
// H (ii,j) is a new entry
GB_HX_WRITE (pH+ii, t) ; // Hx (ii,j)=t
Hf [pH+ii] = 1 ;
}
else
{
// H (ii,j) is already present
GB_HX_UPDATE (pH+ii, t) ; // Hx (ii,j)+=t
}
}
}
#endif
}
}
#undef GB_MULT_G_iik_B_kj
}
}
//----------------------------------------------------------------------
// C (metapanel,:) += H
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// get the panel of H and G for this task
//------------------------------------------------------------------
int a_tid = tid / nbslice ;
int b_tid = tid % nbslice ;
int64_t istart = iouter + a_tid * GB_PANEL_SIZE ;
int64_t iend = iouter + (a_tid+1) * GB_PANEL_SIZE ;
iend = GB_IMIN (iend, avlen) ;
int64_t np = iend - istart ;
if (np <= 0) continue ;
int64_t task_cnvals = 0 ;
int64_t kstart, kend ;
GB_PARTITION (kstart, kend, bnvec, b_tid, nbslice) ;
int8_t *GB_RESTRICT Hf = Wf + (a_tid * hpanel_size) + wafsize ;
GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *)
(Wcx + (a_tid * hpanel_size) * GB_CSIZE) ;
//------------------------------------------------------------------
// C<#M>(metapanel,j1:j2-1) += H (:,kstart:kend-1)
//------------------------------------------------------------------
// If B is hypersparse, the kk-th vector of H is the jth vector
// of C, where j = Bh [kk].
for (int64_t kk = kstart ; kk < kend ; kk++)
{
int64_t j = GBH (Bh, kk) ; // j is the range j1:j2-1
int64_t pC_start = istart + j * avlen ; // get C(istart,j)
int64_t pH_start = kk * np ; // get H(:,kk)
for (int64_t ii = 0 ; ii < np ; ii++)
{
int64_t pC = pC_start + ii ; // get C(i,j)
int64_t pH = pH_start + ii ; // get H(ii,kk)
if (!Hf [pH]) continue ;
Hf [pH] = 0 ; // clear the panel
int8_t cb = Cb [pC] ;
//----------------------------------------------------------
// check M(i,j)
//----------------------------------------------------------
#undef GB_IF_MIJ
#if GB_MASK_IS_SPARSE_OR_HYPER
// M is sparse or hypersparse
bool mij = ((cb & 2) != 0) ^ Mask_comp ;
cb = (cb & 1) ;
#define GB_IF_MIJ if (mij)
#elif GB_MASK_IS_BITMAP_OR_FULL
// M is bitmap or full
GB_GET_M_ij (pC) ;
mij = mij ^ Mask_comp ;
#define GB_IF_MIJ if (mij)
#else
#define GB_IF_MIJ
#endif
//----------------------------------------------------------
// C(i,j) += H(ii,kk)
//----------------------------------------------------------
GB_IF_MIJ
{
if (cb == 0)
{
// C(i,j) = H(ii,kk)
#if GB_IS_ANY_PAIR_SEMIRING
Cx [pC] = GB_CTYPE_CAST (1,0) ; // C(i,j) = 1
#else
GB_CIJ_GATHER (pC, pH) ;
#endif
Cb [pC] = keep ;
task_cnvals++ ;
}
else
{
// Currently, the matrix C is a newly allocated
// matrix, not the C_in input matrix to GrB_mxm.
// As a result, this condition is not used. It
// will be in the future when this method is
// modified to modify C in-place.
ASSERT (GB_DEAD_CODE) ;
// C(i,j) += H(ii,kk)
GB_CIJ_GATHER_UPDATE (pC, pH) ;
}
}
//----------------------------------------------------------
// clear the panel
//----------------------------------------------------------
#if GB_HAS_BITMAP_MULTADD && !GB_IS_ANY_PAIR_SEMIRING
{
// H(ii,kk) = identity
Hx [pH] = GB_IDENTITY ;
}
#endif
}
}
cnvals += task_cnvals ;
}
}
}
#undef GB_IF_MIJ
|
4.race1.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 4
int main() {
int A[N][N][N][N][N][N][N][N][N];
#pragma omp parallel for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
for (int k = 1; k < N; k++)
for (int l = 1; l < N; l++)
for (int m = 1; m < N; m++)
for (int n = 1; n < N; n++)
for (int o = 1; o < N; o++)
for (int p = 1; p < N; p++)
for (int q = 1; q < N; q++)
A[i][j][k][l][m][n][o][p][q] +=
A[i - 1][j][k][l][m][n][o][p][q];
}
// CHECK: Data Race detected
// END
|
jtnormal_intel.c | #include <complex.h>
#include <fftw/fftw3.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <assert.h>
#include <string.h>
#include <omp.h>
#include <xmmintrin.h>
#include <mkl.h>
#define BLK 4
inline void TransposeBLKxBLK(complex float * __restrict__ A, complex float * __restrict__ B) {
int i, j;
for (i = 0; i < BLK; i++)
for (j = 0; j < BLK; j++)
B[i*BLK + j] = A[j*BLK + i];
}
inline void TransposePanel(complex float * __restrict__ cor_out,
complex float * __restrict__ cor_out2,
int _p,
int tid,
int dim0,
int dim1)
{
int nblk0 = dim0 / BLK;
int nblk1 = _p / BLK;
for(int cc = 0 ; cc < nblk1 ; cc++)
{
for(int bb = 0 ; bb < nblk0 ; bb++)
{
int mine = (bb+tid)%nblk0;
int b = mine * BLK;
int c = cc * BLK;
complex float buf1[BLK*BLK];
complex float buf2[BLK*BLK];
for(int i = 0 ; i < BLK ; i++)
{
#pragma simd
for(int j = 0 ; j < BLK ; j++)
{
buf1[j + i*BLK] = cor_out[b + j + (c+i)*dim0];
}
}
TransposeBLKxBLK(buf1, buf2);
for(int i = 0 ; i < BLK ; i++)
{
#pragma simd
for(int j = 0 ; j < BLK ; j++)
{
cor_out2[c + j + (b+i)*dim1] = buf2[j + i*BLK];
}
}
}
}
for(int cc = nblk1*BLK ; cc < _p ; cc++)
{
#pragma simd
for(int i = 0 ; i < dim0 ; i++)
{
cor_out2[cc + i*dim1] = cor_out[i + cc*dim0];
}
}
// Do extra columns
for(int bb = nblk0*BLK ; bb < dim0 ; bb++)
{
for(int cc = 0 ; cc < _p ; cc++)
{
cor_out2[cc + bb*dim1] = cor_out[bb + cc*dim0];
}
}
}
void jtmodel_normal_benchmark_fast_parallel(
const complex float * __restrict__ sens, const float * __restrict__ stkern_mat,
complex float * dst, const complex float * src,
const unsigned long dim0,
const unsigned long dim1,
const unsigned long ncoils,
const unsigned long nmaps,
const unsigned long ncfimg,
DFTI_DESCRIPTOR_HANDLE plan1d_0, DFTI_DESCRIPTOR_HANDLE plan1d_1,
complex float * cfksp3,
complex float * cfksp4) {
struct timeval start, end;
int nthr = omp_get_max_threads();
int P = (dim1 + nthr-1) / nthr;
int P0 = (dim0 + nthr-1) / nthr;
float sc = 1.0 / sqrt((double)dim0 * dim1);
assert(nmaps == 1 || nmaps == 2);
for(int coil = 0 ; coil < ncoils ; coil++)
{
#pragma omp parallel num_threads(nthr)
{
int tid = omp_get_thread_num();
int row_start = tid * P;
int row_end = (tid+1) * P;
if(row_end > dim1) row_end = dim1;
for(int cfimg = 0 ; cfimg < ncfimg ; cfimg++)
{
for(int row = row_start ; row < row_end; row++)
{
const complex float *map0 = sens + coil * dim1 * dim0 + dim0 * row;
const complex float *map1 = NULL;
if (nmaps == 2)
map1 = sens + coil * dim1 * dim0 + ncoils * dim0 * dim1 + dim0 * row;
const complex float *cfimg0 = src + cfimg * dim0 * dim1 * nmaps + dim0 * row;
const complex float *cfimg1 = NULL;
if (nmaps == 2)
cfimg1 = src + dim0 * dim1 + cfimg * dim0 * dim1 * nmaps + dim0 * row;
complex float *cor_out =
cfksp3 + cfimg * dim1 * dim0 + dim0 * row;
#pragma simd
for (int i = 0; i < dim0; i++) {
if (nmaps == 2)
cor_out[i] = (map0[i] * cfimg0[i] + map1[i] * cfimg1[i]) * sc;
else
cor_out[i] = (map0[i] * cfimg0[i]) * sc;
}
DftiComputeForward(plan1d_0, cor_out, cor_out);
}
complex float *cor_out =
cfksp3 + cfimg * dim1 * dim0 + dim0 * row_start;
complex float *cor_out2 =
cfksp4 + cfimg * dim1 * dim0 + row_start;
TransposePanel(cor_out, cor_out2, row_end-row_start, tid, dim0, dim1);
}
}
#pragma omp parallel num_threads(nthr)
{
int tid = omp_get_thread_num();
int row_start = tid * P0;
int row_end = (tid+1) * P0;
if(row_end > dim0) row_end = dim0;
complex float * stkern_tmp = (complex float*) malloc(dim1 * ncfimg * sizeof(complex float));
for (int row = row_start ; row < row_end ; row++) {
for(int cfimg = 0 ; cfimg < ncfimg ; cfimg++)
{
complex float *cor_out =
cfksp4 + cfimg * dim1 * dim0 + dim1 * row;
DftiComputeForward(plan1d_1, cor_out, cor_out);
}
for(int cfimg_i = 0 ; cfimg_i < ncfimg ; cfimg_i++)
{
complex float *tmp = stkern_tmp + cfimg_i * dim1;
for (int cfimg_j = 0; cfimg_j < ncfimg; cfimg_j++) {
complex float *cfimg_in = cfksp4 +
cfimg_j * dim0 * dim1 + row * dim1;
const float *mat = (cfimg_i > cfimg_j) ? stkern_mat + cfimg_i * dim1 * dim0 + cfimg_j * dim1 * dim0 * ncfimg + row * dim1 :
stkern_mat + cfimg_j * dim1 * dim0 + cfimg_i * dim1 * dim0 * ncfimg + row * dim1;
if(cfimg_j == 0)
{
#pragma simd
for (int pix = 0; pix < dim1; pix++) {
tmp[pix] = (cfimg_in[pix] * mat[pix]);
}
}
else
{
#pragma simd
for (int pix = 0; pix < dim1; pix++) {
tmp[pix] += (cfimg_in[pix] * mat[pix]);
}
}
}
DftiComputeBackward(plan1d_1, tmp, tmp);
}
for(int cfimg_i = 0 ; cfimg_i < ncfimg ; cfimg_i++)
{
complex float *cfimg_in = cfksp4 +
cfimg_i * dim0 * dim1 + row * dim1;
#pragma simd
for (int pix = 0; pix < dim1; pix++) {
cfimg_in[pix] = stkern_tmp[pix + cfimg_i*dim1];
}
}
}
free(stkern_tmp);
for(int cfimg_i = 0 ; cfimg_i < ncfimg ; cfimg_i++)
{
complex float *cfimg_in = cfksp4 +
cfimg_i * dim0 * dim1 + row_start * dim1;
complex float *cfimg_in2 = cfksp3 +
cfimg_i * dim0 * dim1 + row_start;
TransposePanel(cfimg_in, cfimg_in2, row_end-row_start, tid, dim1, dim0);
}
}
#pragma omp parallel num_threads(nthr)
{
int tid = omp_get_thread_num();
int row_start = tid * P;
int row_end = (tid+1) * P;
if(row_end > dim1) row_end = dim1;
for (int row = row_start ; row < row_end ; row++) {
for (int cfimg = 0; cfimg < ncfimg; cfimg++) {
const complex float *map0 = sens + coil*dim1*dim0 + row * dim0;
const complex float *map1 = NULL;
if (nmaps == 2)
map1 = sens + coil*dim1*dim0 + ncoils *dim0 * dim1 + row * dim0;
complex float *cor0 = dst + cfimg *dim1*dim0*nmaps + row * dim0;
complex float* cor1 = NULL;
if (nmaps == 2)
cor1 = dst + dim1*dim0+cfimg*dim1*dim0*nmaps + row * dim0;
complex float *cfimg_in = cfksp3 + cfimg*dim0*dim1 + row * dim0;
DftiComputeBackward(plan1d_0, cfimg_in, cfimg_in);
if(coil == 0)
{
#pragma simd
for (int i = 0; i < dim0; i++) {
cor0[i] = 0;
if (nmaps == 2)
cor1[i] = 0;
}
}
#pragma simd
for (int i = 0; i < dim0; i++) {
float r0 = __real__ map0[i];
float i0 = __imag__ map0[i];
float r1 = 0;
float i1 = 0;
if (nmaps == 2) {
r1 = __real__ map1[i];
i1 = __imag__ map1[i];
}
float _r = __real__ cfimg_in[i];
float _i = __imag__ cfimg_in[i];
cor0[i] += ((r0 * _r + i0 * _i) + (r0 * _i - i0 * _r) * _Complex_I) * sc;
if (nmaps == 2)
cor1[i] += ((r1 * _r + i1 * _i) + (r1 * _i - i1 * _r) * _Complex_I) * sc;
}
}
}
}
}
}
void jtmodel_adjoint_benchmark_fast_parallel(
const complex float * __restrict__ sens,
complex float * dst, const complex float * src,
const unsigned long dim0,
const unsigned long dim1,
const unsigned long ncoils,
const unsigned long nmaps,
const unsigned long ncfimg,
DFTI_DESCRIPTOR_HANDLE plan2d,
complex float * cfksp3)
{
assert(nmaps == 1 || nmaps == 2);
float sc = 1.0 / sqrt((double)dim0 * dim1);
for(int coil = 0 ; coil < ncoils ; coil++)
{
const complex float * map0 = sens + coil * dim0 * dim1;
const complex float * map1 = NULL;
if (nmaps == 2)
map1 = sens + coil * dim0 * dim1 + ncoils * dim0*dim1;
for(int cfimg = 0 ; cfimg < ncfimg ; cfimg++)
{
complex float * ksp = (complex float*)src + coil*dim0*dim1 + cfimg*ncoils*dim0*dim1;
DftiComputeBackward(plan2d, ksp, cfksp3);
complex float * cor0 = dst + nmaps * cfimg * dim0 * dim1;
complex float * cor1 = NULL;
if (nmaps == 2)
cor1 = dst + nmaps * cfimg * dim0 * dim1 + dim0*dim1;
if(coil == 0)
{
#pragma omp parallel for
#pragma simd
for (int i = 0; i < dim0*dim1; i++) {
cor0[i] = 0;
if (nmaps == 2)
cor1[i] = 0;
}
}
#pragma omp parallel for
#pragma simd
for (int i = 0; i < dim0*dim1; i++) {
float r0 = __real__ map0[i];
float i0 = __imag__ map0[i];
float r1 = 0.;
float i1 = 0;
if (nmaps == 2) {
r1 = __real__ map1[i];
i1 = __imag__ map1[i];
}
float _r = __real__ cfksp3[i];
float _i = __imag__ cfksp3[i];
cor0[i] += ((r0 * _r + i0 * _i) + (r0 * _i - i0 * _r) * _Complex_I) * sc;
if (nmaps == 2)
cor1[i] += ((r1 * _r + i1 * _i) + (r1 * _i - i1 * _r) * _Complex_I) * sc;
}
}
}
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of tsoobgx.
* \author Tianqi Chen
*/
#ifndef TSOOBGX_DATA_H_
#define TSOOBGX_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace tsoobgx {
// forward declare learner.
class LearnerImpl;
/*! \brief data type accepted by tsoobgx interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*! \brief session-id of each instance, optional */
std::vector<uint64_t> qids_;
/*!
* \brief initialized margins,
* if specified, tsoobgx will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 2;
/*! \brief version that introduced qid field */
static const int kVersionQidAdded = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return root_index_.size() != 0 ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
TSOOBGX_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
inline void Push(const Inst &inst) {
auto& data_vec = data.HostVector();
auto& offset_vec = offset.HostVector();
offset_vec.push_back(offset_vec.back() + inst.size());
size_t begin = data_vec.size();
data_vec.resize(begin + inst.size());
if (inst.size() != 0) {
std::memcpy(dmlc::BeginPtr(data_vec) + begin, inst.data(),
sizeof(Entry) * inst.size());
}
}
size_t Size() { return offset.Size() - 1; }
};
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual BatchIteratorImpl* Clone() = 0;
virtual SparsePage& operator*() = 0;
virtual const SparsePage& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); }
BatchIterator(const BatchIterator& other) {
if (other.impl_) {
impl_.reset(other.impl_->Clone());
} else {
impl_.reset();
}
}
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
SparsePage& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const SparsePage& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::unique_ptr<BatchIteratorImpl> impl_;
};
class BatchSet {
public:
explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {}
BatchIterator begin() { return begin_iter_; }
BatchIterator end() { return BatchIterator(nullptr); }
private:
BatchIterator begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
class DataSource : public dmlc::DataIter<SparsePage> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief A vector-like structure to represent set of rows.
* But saves the memory when all rows are in the set (common case in bgx)
*/
class RowSet {
public:
/*! \return i-th row index */
inline bst_uint operator[](size_t i) const;
/*! \return the size of the set. */
inline size_t Size() const;
/*! \brief push the index back to the set */
inline void PushBack(bst_uint i);
/*! \brief clear the set */
inline void Clear();
/*!
* \brief save rowset to file.
* \param fo The file to be saved.
*/
inline void Save(dmlc::Stream* fo) const;
/*!
* \brief Load rowset from file.
* \param fi The file to be loaded.
* \return if read is successful.
*/
inline bool Load(dmlc::Stream* fi);
/*! \brief constructor */
RowSet() = default;
private:
/*! \brief The internal data structure of size */
uint64_t size_{0};
/*! \brief The internal data structure of row set if not all*/
std::vector<bst_uint> rows_;
};
/*!
* \brief Internal data structured used by tsooBGX during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets row batches. Use range based for loop over BatchSet to access individual batches.
*/
virtual BatchSet GetRowBatches() = 0;
virtual BatchSet GetSortedColumnBatches() = 0;
virtual BatchSet GetColumnBatches() = 0;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
const size_t page_size = kPageSize);
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \param page_size Page size for external memory.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "",
const size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
};
// implementation of inline functions
inline bst_uint RowSet::operator[](size_t i) const {
return rows_.size() == 0 ? static_cast<bst_uint>(i) : rows_[i];
}
inline size_t RowSet::Size() const {
return size_;
}
inline void RowSet::Clear() {
rows_.clear(); size_ = 0;
}
inline void RowSet::PushBack(bst_uint i) {
if (rows_.size() == 0) {
if (i == size_) {
++size_; return;
} else {
rows_.resize(size_);
for (size_t i = 0; i < size_; ++i) {
rows_[i] = static_cast<bst_uint>(i);
}
}
}
rows_.push_back(i);
++size_;
}
inline void RowSet::Save(dmlc::Stream* fo) const {
fo->Write(rows_);
fo->Write(&size_, sizeof(size_));
}
inline bool RowSet::Load(dmlc::Stream* fi) {
if (!fi->Read(&rows_)) return false;
if (rows_.size() != 0) return true;
return fi->Read(&size_, sizeof(size_)) == sizeof(size_);
}
} // namespace tsoobgx
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, tsoobgx::Entry, true);
DMLC_DECLARE_TRAITS(has_saveload, tsoobgx::RowSet, true);
}
#endif // TSOOBGX_DATA_H_
|
build_list.h | #pragma once
/******************************************************************************
*
* mfmm
* A high-performance fast multipole method library using C++.
*
* A fork of ExaFMM (BSD-3-Clause lisence).
* Originally copyright Wang, Yokota and Barba.
*
* Modifications copyright HJA Bird.
*
******************************************************************************/
#ifndef INCLUDE_MFMM_BUILD_LIST_H_
#define INCLUDE_MFMM_BUILD_LIST_H_
#include <queue>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include "fmm.h"
#include "geometry.h"
#include "mfmm.h"
#include "octree_location.h"
namespace mfmm {
/** Generate the mapping from Morton keys to node indices in the tree.
* @param nodes Tree.
* @return Keys to indices mapping.
*/
template <typename T>
std::unordered_map<octree_location, size_t> get_key2id(const Nodes<T>& nodes) {
std::unordered_map<octree_location, size_t> key2id;
for (size_t i = 0; i < nodes.size(); ++i) {
key2id[nodes[i].location()] = nodes[i].index();
}
return key2id;
}
/** Generate the set of keys of all leaf nodes.
* @param nodes Tree.
* @return Set of all leaf keys with level offset.
*/
template <typename T>
std::unordered_set<octree_location> get_leaf_keys(const Nodes<T>& nodes) {
// we cannot use leafs to generate leaf keys, since it does not include
// empty leaf nodes where ntrgs and nsrcs are 0.
std::unordered_set<octree_location> leafKeys;
for (size_t i = 0; i < nodes.size(); ++i) {
if (nodes[i].is_leaf()) {
leafKeys.insert(nodes[i].location());
}
}
return leafKeys;
}
/** Given the 3D index of an octant and its depth, return the key of
* the leaf that contains the octant. If such leaf does not exist, return the
* key of the original octant.
* @param iX Integer index of the octant.
* @param level The level of the octant.
* @return Morton index with level offset.
*/
octree_location find_key(const ivec3& iX, int level,
const std::unordered_set<octree_location>& leafKeys) {
octree_location originalKey(iX, level);
octree_location currentKey = originalKey;
while (level > 0) {
if (leafKeys.find(currentKey) != leafKeys.end()) { // if key is leaf
return currentKey;
} else { // else go 1 level up
currentKey = currentKey.parent();
level--;
}
}
return originalKey;
}
/** Build lists for P2P, P2L and M2P operators for a given node.
* @param node Node.
* @param nodes Tree.
* @param leafKeys The set of all leaf keys.
* @param key2id The mapping from a node's key to its index in the tree.
*/
template <typename FmmT>
void build_other_list(
Node<typename FmmT::potential_t>* node,
Nodes<typename FmmT::potential_t>& nodes, const FmmT& fmm,
const std::unordered_set<octree_location>& leafKeys,
const std::unordered_map<octree_location, size_t>& key2id) {
using node_t = Node<typename FmmT::potential_t>;
std::set<node_t*> p2pSet, m2pSet, p2lSet;
node_t& currentNode = *node;
if (currentNode.location() != octree_location(0, 0)) {
node_t* parent = currentNode.parent();
ivec3 min3dIdx = {0, 0, 0};
ivec3 max3dIdx = ivec3::Ones(3) * (1 << node->location().level());
ivec3 current3dIdx = currentNode.location().get_3D_index();
ivec3 parent3dIdx = parent->location().get_3D_index();
// search in every direction
for (int i = -2; i < 4; i++) {
for (int j = -2; j < 4; j++) {
for (int k = -2; k < 4; k++) {
ivec3 direction{i, j, k};
direction += parent3dIdx * 2;
if ((direction.array() >= min3dIdx.array()).all() &&
(direction.array() < max3dIdx.array()).all() &&
direction != current3dIdx) {
octree_location resKey =
find_key(direction, currentNode.location().level(), leafKeys);
bool adj = resKey.is_adjacent(currentNode.location());
node_t& res = nodes[key2id.at(resKey)];
if (res.location().level() <
currentNode.location().level()) { // when res node is a leaf
if (adj) {
if (currentNode.is_leaf()) {
p2pSet.insert(&res);
}
} else {
if (currentNode.is_leaf() &&
currentNode.num_targets() <= fmm.m_numSurf) {
p2pSet.insert(&res);
} else {
p2lSet.insert(&res);
}
}
}
if (res.location().level() ==
currentNode.location().level()) { // when res is a colleague
if (adj) {
if (currentNode.is_leaf()) {
std::queue<node_t*> buffer;
buffer.push(&res);
while (!buffer.empty()) {
node_t& temp = *buffer.front();
buffer.pop();
if (!temp.location().is_adjacent(currentNode.location())) {
if (temp.is_leaf() &&
temp.num_sources() <= fmm.m_numSurf) {
p2pSet.insert(&temp);
} else {
m2pSet.insert(&temp);
}
} else {
if (temp.is_leaf()) {
p2pSet.insert(&temp);
} else {
for (int i = 0; i < NCHILD; i++) {
if (temp.has_child(i)) {
buffer.push(&temp.child(i));
}
}
}
}
}
}
}
}
}
}
}
}
}
if (currentNode.is_leaf()) {
p2pSet.insert(¤tNode);
}
for (auto i = p2pSet.begin(); i != p2pSet.end(); i++) {
if ((*i) != nullptr) {
currentNode.P2Plist().push_back(*i);
}
}
for (auto i = p2lSet.begin(); i != p2lSet.end(); i++) {
if ((*i) != nullptr) {
currentNode.P2Llist().push_back(*i);
}
}
for (auto i = m2pSet.begin(); i != m2pSet.end(); i++) {
if ((*i) != nullptr) {
currentNode.M2Plist().push_back(*i);
}
}
}
/** Build M2L interaction list for a given node.
* @param node Node.
* @param nodes Tree.
* @param key2id The mapping from a node's key to its index in the tree.
*/
template <typename T>
void build_M2L_list(Node<T>* node, Nodes<T>& nodes,
const std::unordered_map<octree_location, size_t>& key2id) {
using node_t = Node<T>;
node->M2Llist().resize(REL_COORD_M2L.size(), nullptr);
node_t& currentNode = *node;
ivec3 min3dIdx = {0, 0, 0};
ivec3 max3dIdx = ivec3::Ones(3) * (1 << currentNode.location().level());
if (!currentNode.is_leaf()) {
ivec3 current3dIdx = currentNode.location().get_3D_index();
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
for (int k = -1; k <= 1; k++) {
if (i || j || k) { // exclude current node itself
ivec3 relativeCoord{i, j, k};
ivec3 nearby3dIdx = current3dIdx + relativeCoord;
if ((nearby3dIdx.array() >= min3dIdx.array()).all() &&
(nearby3dIdx.array() < max3dIdx.array()).all()) {
octree_location nearbyLoc(nearby3dIdx,
currentNode.location().level());
if (key2id.find(nearbyLoc) != key2id.end()) {
node_t& nearbyNode = nodes[key2id.at(nearbyLoc)];
if (!nearbyNode.is_leaf()) {
size_t idx = REL_COORD_M2L.hash(relativeCoord);
currentNode.M2Llist()[idx] = &nearbyNode;
}
}
}
}
}
}
}
}
}
/** Build lists for all operators for all nodes in the tree.
* @param nodes Tree.
* @param fmm The FMM instance.
*/
template <typename FmmT>
void build_list(Nodes<typename FmmT::potential_t>& nodes, const FmmT& fmm) {
using node_t = Node<typename FmmT::potential_t>;
std::unordered_map<octree_location, size_t> key2id = get_key2id(nodes);
std::unordered_set<octree_location> leaf_keys = get_leaf_keys(nodes);
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < static_cast<int>(nodes.size()); i++) {
node_t* node = &nodes[i];
build_M2L_list(node, nodes, key2id);
build_other_list(node, nodes, fmm, leaf_keys, key2id);
}
}
} // namespace mfmm
#endif // INCLUDE_MFMM_BUILD_LIST_H_
|
exchange_boundary.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// perform a (intra-level) ghost zone exchange on vector id
// NOTE exchange_boundary() only exchanges the boundary.
// It will not enforce any boundary conditions
// BC's are either the responsibility of a separate function or should be fused into the stencil
// The argument shape indicates which of faces, edges, and corners on each box must be exchanged
// If the specified shape exceeds the range of defined shapes, the code will default to STENCIL_SHAPE_BOX (i.e. exchange faces, edges, and corners)
//#if CD
//#include "cd.h"
//#endif
void exchange_boundary(level_type * level, int id, int shape){
double _timeCommunicationStart = getTime();
double _timeStart,_timeEnd;
if(shape>=STENCIL_MAX_SHAPES)shape=STENCIL_SHAPE_BOX; // shape must be < STENCIL_MAX_SHAPES in order to safely index into exchange_ghosts[]
int my_tag = (level->tag<<4) | shape;
int buffer=0;
int n;
#ifdef USE_MPI
int nMessages = level->exchange_ghosts[shape].num_recvs + level->exchange_ghosts[shape].num_sends;
MPI_Request *recv_requests = level->exchange_ghosts[shape].requests;
MPI_Request *send_requests = level->exchange_ghosts[shape].requests + level->exchange_ghosts[shape].num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
if(level->exchange_ghosts[shape].num_recvs>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level->exchange_ghosts[shape].num_recvs;n++){
MPI_Irecv(level->exchange_ghosts[shape].recv_buffers[n],
level->exchange_ghosts[shape].recv_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[shape].recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = getTime();
level->timers.ghostZone_recv += (_timeEnd-_timeStart);
}
// pack MPI send buffers...
if(level->exchange_ghosts[shape].num_blocks[0]){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[0])
for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[0];buffer++){
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[0][buffer]);
}
_timeEnd = getTime();
level->timers.ghostZone_pack += (_timeEnd-_timeStart);
}
// loop through MPI send buffers and post Isend's...
if(level->exchange_ghosts[shape].num_sends>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level->exchange_ghosts[shape].num_sends;n++){
MPI_Isend(level->exchange_ghosts[shape].send_buffers[n],
level->exchange_ghosts[shape].send_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[shape].send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = getTime();
level->timers.ghostZone_send += (_timeEnd-_timeStart);
}
#endif
// exchange locally... try and hide within Isend latency...
if(level->exchange_ghosts[shape].num_blocks[1]){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[1])
for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[1];buffer++){
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[1][buffer]);
}
_timeEnd = getTime();
level->timers.ghostZone_local += (_timeEnd-_timeStart);
}
// wait for MPI to finish...
#ifdef USE_MPI
if(nMessages){
_timeStart = getTime();
MPI_Waitall(nMessages,level->exchange_ghosts[shape].requests,level->exchange_ghosts[shape].status);
_timeEnd = getTime();
level->timers.ghostZone_wait += (_timeEnd-_timeStart);
}
// unpack MPI receive buffers
if(level->exchange_ghosts[shape].num_blocks[2]){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[2])
for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[2];buffer++){
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[2][buffer]);
}
_timeEnd = getTime();
level->timers.ghostZone_unpack += (_timeEnd-_timeStart);
}
#endif
level->timers.ghostZone_total += (double)(getTime()-_timeCommunicationStart);
}
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return((double)cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
{ CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
{ CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) exception;
resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter));
(void) memset(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double) resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double) support);
if ((filter_type == CubicFilter) || (window_type == CubicFilter))
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
if (status == MagickFalse)
break;
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
register gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
register Quantum
*magick_restrict p;
register ssize_t
i;
p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (p == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),p);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict r;
register ssize_t
i;
size_t
channels;
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
channels=GetPixelChannels(image);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i*channels);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
q+=2*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) memset(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double x_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double y_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelTrait
scale_traits;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns,
MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*y_vector));
if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) || (y_vector == (double *) NULL))
{
if ((image->rows != scale_image->rows) && (scanline != (double *) NULL))
scanline=(double *) RelinquishMagickMemory(scanline);
if (scale_scanline != (double *) NULL)
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (x_vector != (double *) NULL)
x_vector=(double *) RelinquishMagickMemory(x_vector);
if (y_vector != (double *) NULL)
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns*
sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) &&
(t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
filename[MagickPathExtent],
value[MagickPathExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
GetPathComponent(image->magick_filename,TailPath,filename);
(void) CopyMagickString(value,filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
{
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception);
}
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
(void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value,
exception);
return(thumbnail_image);
}
|
ompnn.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
#include<time.h>
#include<math.h>
#define size 4000
float sigmoid(float x)
{
float exp_value;
float return_value;
exp_value = exp((float) -x);
return_value = 1 / (1 + exp_value);
return return_value;
}
int main()
{
float f;
int i;
int j;
// int size;
// printf("Enter the number of neurons: ");
// scanf("%d",&size);
float weight[size][size];
float layer[size][size];
float output[size][size];
/* float **weight = (float **)malloc(size * sizeof(float *));
for (i=0; i<size; i++)
weight[i] = (float *)malloc(size * sizeof(float));
float **layer = (float **)malloc(size * sizeof(float *));
for (i=0; i<size; i++)
layer[i] = (float *)malloc(size * sizeof(float));
float **output = (float **)malloc(size * sizeof(float *));
for (i=0; i<size; i++)
output[i] = (float *)malloc(size * sizeof(float)); */
// Initialize buffers.
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
f=rand()%1000+1;
f=f/1000;
weight[i][j] = f;
output[i][j] = 0.0f;
}
}
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
if(j==0){
f=rand()%1000+1;
f=f/1000;
layer[i][j]=f;
}
else{
layer[i][j]=layer[i][j-1];
}
}
}
double time_spent=0.0;
clock_t begin=clock();
// #pragma omp parallel for default(none) shared(weight,layer,output,size)
#pragma omp parallel for default(none) shared(weight,layer,output)
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
for (int k = 0; k < size; ++k) {
output[i][j] += weight[i][k] * layer[k][j];
}
}
}
// #pragma omp parallel for default(none) shared(output,size)
#pragma omp parallel for default(none) shared(output)
for (int i = 0; i < size; ++i) {
output[i][0]=sigmoid(output[i][0]);
}
clock_t end=clock();
time_spent+=(double)(end-begin)/CLOCKS_PER_SEC;
//printf("Time elapsed is %f seconds", time_spent);
//free(layer);
//free(weight);
//free(output);
return 0;
}
|
graph_generator.c | /* Copyright (C) 2009-2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "graph_generator.h"
#include "splittable_mrg.h"
#include "user_settings.h"
/* Initiator settings: for faster random number generation, the initiator
* probabilities are defined as fractions (a = INITIATOR_A_NUMERATOR /
* INITIATOR_DENOMINATOR, b = c = INITIATOR_BC_NUMERATOR /
* INITIATOR_DENOMINATOR, d = 1 - a - b - c. */
#define INITIATOR_A_NUMERATOR 5700
#define INITIATOR_BC_NUMERATOR 1900
#define INITIATOR_DENOMINATOR 10000
/* If this macro is defined to a non-zero value, use SPK_NOISE_LEVEL /
* INITIATOR_DENOMINATOR as the noise parameter to use in introducing noise
* into the graph parameters. The approach used is from "A Hitchhiker's Guide
* to Choosing Parameters of Stochastic Kronecker Graphs" by C. Seshadhri, Ali
* Pinar, and Tamara G. Kolda (http://arxiv.org/abs/1102.5046v1), except that
* the adjustment here is chosen based on the current level being processed
* rather than being chosen randomly. */
#define SPK_NOISE_LEVEL 0
/* #define SPK_NOISE_LEVEL 1000 -- in INITIATOR_DENOMINATOR units */
static int generate_4way_bernoulli(mrg_state* st, int level, int nlevels) {
#if SPK_NOISE_LEVEL == 0
/* Avoid warnings */
(void)level;
(void)nlevels;
#endif
/* Generate a pseudorandom number in the range [0, INITIATOR_DENOMINATOR)
* without modulo bias. */
static const uint32_t limit = (UINT32_C(0x7FFFFFFF) % INITIATOR_DENOMINATOR);
uint32_t val = mrg_get_uint_orig(st);
if (/* Unlikely */ val < limit) {
do {
val = mrg_get_uint_orig(st);
} while (val < limit);
}
#if SPK_NOISE_LEVEL == 0
int spk_noise_factor = 0;
#else
int spk_noise_factor =
2 * SPK_NOISE_LEVEL * level / nlevels - SPK_NOISE_LEVEL;
#endif
unsigned int adjusted_bc_numerator =
(unsigned int)(INITIATOR_BC_NUMERATOR + spk_noise_factor);
val %= INITIATOR_DENOMINATOR;
if (val < adjusted_bc_numerator) return 1;
val = (uint32_t)(val - adjusted_bc_numerator);
if (val < adjusted_bc_numerator) return 2;
val = (uint32_t)(val - adjusted_bc_numerator);
#if SPK_NOISE_LEVEL == 0
if (val < INITIATOR_A_NUMERATOR) return 0;
#else
if (val < INITIATOR_A_NUMERATOR *
(INITIATOR_DENOMINATOR - 2 * INITIATOR_BC_NUMERATOR) /
(INITIATOR_DENOMINATOR - 2 * adjusted_bc_numerator))
return 0;
#endif
#if SPK_NOISE_LEVEL == 0
/* Avoid warnings */
(void)level;
(void)nlevels;
#endif
return 3;
}
/* Reverse bits in a number; this should be optimized for performance
* (including using bit- or byte-reverse intrinsics if your platform has them).
* */
static inline uint64_t bitreverse(uint64_t x) {
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
#define USE_GCC_BYTESWAP /* __builtin_bswap* are in 4.3 but not 4.2 */
#endif
#ifdef FAST_64BIT_ARITHMETIC
/* 64-bit code */
#ifdef USE_GCC_BYTESWAP
x = __builtin_bswap64(x);
#else
x = (x >> 32) | (x << 32);
x = ((x >> 16) & UINT64_C(0x0000FFFF0000FFFF)) |
((x & UINT64_C(0x0000FFFF0000FFFF)) << 16);
x = ((x >> 8) & UINT64_C(0x00FF00FF00FF00FF)) |
((x & UINT64_C(0x00FF00FF00FF00FF)) << 8);
#endif
x = ((x >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) |
((x & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4);
x = ((x >> 2) & UINT64_C(0x3333333333333333)) |
((x & UINT64_C(0x3333333333333333)) << 2);
x = ((x >> 1) & UINT64_C(0x5555555555555555)) |
((x & UINT64_C(0x5555555555555555)) << 1);
return x;
#else
/* 32-bit code */
uint32_t h = (uint32_t)(x >> 32);
uint32_t l = (uint32_t)(x & UINT32_MAX);
#ifdef USE_GCC_BYTESWAP
h = __builtin_bswap32(h);
l = __builtin_bswap32(l);
#else
h = (h >> 16) | (h << 16);
l = (l >> 16) | (l << 16);
h = ((h >> 8) & UINT32_C(0x00FF00FF)) | ((h & UINT32_C(0x00FF00FF)) << 8);
l = ((l >> 8) & UINT32_C(0x00FF00FF)) | ((l & UINT32_C(0x00FF00FF)) << 8);
#endif
h = ((h >> 4) & UINT32_C(0x0F0F0F0F)) | ((h & UINT32_C(0x0F0F0F0F)) << 4);
l = ((l >> 4) & UINT32_C(0x0F0F0F0F)) | ((l & UINT32_C(0x0F0F0F0F)) << 4);
h = ((h >> 2) & UINT32_C(0x33333333)) | ((h & UINT32_C(0x33333333)) << 2);
l = ((l >> 2) & UINT32_C(0x33333333)) | ((l & UINT32_C(0x33333333)) << 2);
h = ((h >> 1) & UINT32_C(0x55555555)) | ((h & UINT32_C(0x55555555)) << 1);
l = ((l >> 1) & UINT32_C(0x55555555)) | ((l & UINT32_C(0x55555555)) << 1);
return ((uint64_t)l << 32) | h; /* Swap halves */
#endif
}
/* Apply a permutation to scramble vertex numbers; a randomly generated
* permutation is not used because applying it at scale is too expensive. */
static inline int64_t scramble(int64_t v0, int lgN, uint64_t val0,
uint64_t val1) {
uint64_t v = (uint64_t)v0;
v += val0 + val1;
v *= (val0 | UINT64_C(0x4519840211493211));
v = (bitreverse(v) >> (64 - lgN));
assert((v >> lgN) == 0);
v *= (val1 | UINT64_C(0x3050852102C843A5));
v = (bitreverse(v) >> (64 - lgN));
assert((v >> lgN) == 0);
return (int64_t)v;
}
/* Make a single graph edge using a pre-set MRG state. */
static void make_one_edge(int64_t nverts, int level, int lgN, mrg_state* st,
packed_edge* result, uint64_t val0, uint64_t val1) { //, FILE* edge_list) {
int64_t base_src = 0, base_tgt = 0;
while (nverts > 1) {
int square = generate_4way_bernoulli(st, level, lgN);
int src_offset = square / 2;
int tgt_offset = square % 2;
assert(base_src <= base_tgt);
if (base_src == base_tgt) {
/* Clip-and-flip for undirected graph */
if (src_offset > tgt_offset) {
int temp = src_offset;
src_offset = tgt_offset;
tgt_offset = temp;
}
}
nverts /= 2;
++level;
base_src += nverts * src_offset;
base_tgt += nverts * tgt_offset;
}
write_edge(result, scramble(base_src, lgN, val0, val1),
scramble(base_tgt, lgN, val0, val1)); //Apply random permutation to vertex numbers
fprintf(stderr, "%ld %ld\n", result->v0, result->v1);
//fprintf(edge_list, "%ld %ld\n", result->v0, result->v1);
//fflush(edge_list);
}
/* Generate a range of edges (from start_edge to end_edge of the total graph),
* writing into elements [0, end_edge - start_edge) of the edges array. This
* code is parallel on OpenMP and XMT; it must be used with
* separately-implemented SPMD parallelism for MPI. */
void generate_kronecker_range(
const uint_fast32_t seed[5] /* All values in [0, 2^31 - 1), not all zero */,
int logN /* In base 2 */, int64_t start_edge, int64_t end_edge,
packed_edge* edges
#ifdef SSSP
,
float* weights
#endif
) {
mrg_state state;
int64_t nverts = (int64_t)1 << logN;
int64_t ei;
mrg_seed(&state, seed);
uint64_t val0, val1; /* Values for scrambling */
{
mrg_state new_state = state;
mrg_skip(&new_state, 50, 7, 0);
val0 = mrg_get_uint_orig(&new_state);
val0 *= UINT64_C(0xFFFFFFFF);
val0 += mrg_get_uint_orig(&new_state);
val1 = mrg_get_uint_orig(&new_state);
val1 *= UINT64_C(0xFFFFFFFF);
val1 += mrg_get_uint_orig(&new_state);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
#ifdef __MTA__
#pragma mta assert parallel
#pragma mta block schedule
#endif
//FILE *int_edge_list;
//int_edge_list = fopen("graph_e16_s","w");
//if(int_edge_list ==NULL){
// fprintf(stderr,"\nCould not create new edge file\n");
//}
for (ei = start_edge; ei < end_edge; ++ei) {
mrg_state new_state = state;
mrg_skip(&new_state, 0, (uint64_t)ei, 0);
make_one_edge(nverts, 0, logN, &new_state, edges + (ei - start_edge), val0,
val1);
#ifdef SSSP
weights[ei - start_edge] = mrg_get_float_orig(&new_state);
#endif
}
}
|
DRB048-firstprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example use of firstprivate()
*/
#include <omp.h>
void foo(int *a,int n,int g)
{
int i;
#pragma omp parallel for private (i) firstprivate (n,g)
for (i = 0; i <= n - 1; i += 1) {
a[i] = a[i] + g;
}
}
int a[100];
int main()
{
int i;
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
a[i] = i;
}
foo(a,100,7);
for (i = 0; i <= 99; i += 1) {
printf("%d\n",a[i]);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.